8214527: ZGC for Aarch64

Implement ZGC for AArch64

Reviewed-by: aph, pliden, eosterlund, rkennke, shade, njian, adinn
This commit is contained in:
Stuart Monteith 2019-06-13 15:24:34 +01:00
parent b4b823c4ae
commit 51eeaf9cb5
16 changed files with 2615 additions and 3 deletions

@ -350,7 +350,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
# Only enable ZGC on supported platforms
AC_MSG_CHECKING([if zgc can be built])
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
(test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"); then
AC_MSG_RESULT([yes])
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"

@ -957,6 +957,146 @@ reg_class v3_reg(
V3, V3_H
);
// Class for 128 bit register v4
reg_class v4_reg(
V4, V4_H
);
// Class for 128 bit register v5
reg_class v5_reg(
V5, V5_H
);
// Class for 128 bit register v6
reg_class v6_reg(
V6, V6_H
);
// Class for 128 bit register v7
reg_class v7_reg(
V7, V7_H
);
// Class for 128 bit register v8
reg_class v8_reg(
V8, V8_H
);
// Class for 128 bit register v9
reg_class v9_reg(
V9, V9_H
);
// Class for 128 bit register v10
reg_class v10_reg(
V10, V10_H
);
// Class for 128 bit register v11
reg_class v11_reg(
V11, V11_H
);
// Class for 128 bit register v12
reg_class v12_reg(
V12, V12_H
);
// Class for 128 bit register v13
reg_class v13_reg(
V13, V13_H
);
// Class for 128 bit register v14
reg_class v14_reg(
V14, V14_H
);
// Class for 128 bit register v15
reg_class v15_reg(
V15, V15_H
);
// Class for 128 bit register v16
reg_class v16_reg(
V16, V16_H
);
// Class for 128 bit register v17
reg_class v17_reg(
V17, V17_H
);
// Class for 128 bit register v18
reg_class v18_reg(
V18, V18_H
);
// Class for 128 bit register v19
reg_class v19_reg(
V19, V19_H
);
// Class for 128 bit register v20
reg_class v20_reg(
V20, V20_H
);
// Class for 128 bit register v21
reg_class v21_reg(
V21, V21_H
);
// Class for 128 bit register v22
reg_class v22_reg(
V22, V22_H
);
// Class for 128 bit register v23
reg_class v23_reg(
V23, V23_H
);
// Class for 128 bit register v24
reg_class v24_reg(
V24, V24_H
);
// Class for 128 bit register v25
reg_class v25_reg(
V25, V25_H
);
// Class for 128 bit register v26
reg_class v26_reg(
V26, V26_H
);
// Class for 128 bit register v27
reg_class v27_reg(
V27, V27_H
);
// Class for 128 bit register v28
reg_class v28_reg(
V28, V28_H
);
// Class for 128 bit register v29
reg_class v29_reg(
V29, V29_H
);
// Class for 128 bit register v30
reg_class v30_reg(
V30, V30_H
);
// Class for 128 bit register v31
reg_class v31_reg(
V31, V31_H
);
// Singleton class for condition codes
reg_class int_flags(RFLAGS);
@ -4774,6 +4914,258 @@ operand vRegD_V3()
interface(REG_INTER);
%}
operand vRegD_V4()
%{
constraint(ALLOC_IN_RC(v4_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V5()
%{
constraint(ALLOC_IN_RC(v5_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V6()
%{
constraint(ALLOC_IN_RC(v6_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V7()
%{
constraint(ALLOC_IN_RC(v7_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V8()
%{
constraint(ALLOC_IN_RC(v8_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V9()
%{
constraint(ALLOC_IN_RC(v9_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V10()
%{
constraint(ALLOC_IN_RC(v10_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V11()
%{
constraint(ALLOC_IN_RC(v11_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V12()
%{
constraint(ALLOC_IN_RC(v12_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V13()
%{
constraint(ALLOC_IN_RC(v13_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V14()
%{
constraint(ALLOC_IN_RC(v14_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V15()
%{
constraint(ALLOC_IN_RC(v15_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V16()
%{
constraint(ALLOC_IN_RC(v16_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V17()
%{
constraint(ALLOC_IN_RC(v17_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V18()
%{
constraint(ALLOC_IN_RC(v18_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V19()
%{
constraint(ALLOC_IN_RC(v19_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V20()
%{
constraint(ALLOC_IN_RC(v20_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V21()
%{
constraint(ALLOC_IN_RC(v21_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V22()
%{
constraint(ALLOC_IN_RC(v22_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V23()
%{
constraint(ALLOC_IN_RC(v23_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V24()
%{
constraint(ALLOC_IN_RC(v24_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V25()
%{
constraint(ALLOC_IN_RC(v25_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V26()
%{
constraint(ALLOC_IN_RC(v26_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V27()
%{
constraint(ALLOC_IN_RC(v27_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V28()
%{
constraint(ALLOC_IN_RC(v28_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V29()
%{
constraint(ALLOC_IN_RC(v29_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V30()
%{
constraint(ALLOC_IN_RC(v30_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V31()
%{
constraint(ALLOC_IN_RC(v31_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Flags register, used as output of signed compare instructions
// note that on AArch64 we also use this register as the output for

@ -1015,7 +1015,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
if (!UseZGC) {
// Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
@ -2869,7 +2873,11 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(patch_code == lir_patch_none, "Patch code not supported");
if (patch_code != lir_patch_none) {
deoptimize_trap(info);
return;
}
__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
}

@ -0,0 +1,407 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "memory/resourceArea.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#endif // COMPILER1
#include "gc/z/zThreadLocalData.hpp"
ZBarrierSetAssembler::ZBarrierSetAssembler() :
_load_barrier_slow_stub(),
_load_barrier_weak_slow_stub() {}
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#undef __
#define __ masm->
void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread) {
if (!ZBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
// rscratch1 can be passed as src or dst, so don't use it.
RegSet savedRegs = RegSet::of(rscratch2, rheapbase);
Label done;
assert_different_registers(rheapbase, rscratch2, dst);
assert_different_registers(rheapbase, rscratch2, src.base());
__ push(savedRegs, sp);
// Load bad mask into scratch register.
__ ldr(rheapbase, address_bad_mask_from_thread(rthread));
__ lea(rscratch2, src);
__ ldr(dst, src);
// Test reference against bad mask. If mask bad, then we need to fix it up.
__ tst(dst, rheapbase);
__ br(Assembler::EQ, done);
__ enter();
__ push(RegSet::range(r0,r28) - RegSet::of(dst), sp);
if (c_rarg0 != dst) {
__ mov(c_rarg0, dst);
}
__ mov(c_rarg1, rscratch2);
int step = 4 * wordSize;
__ mov(rscratch1, -step);
__ sub(sp, sp, step);
for (int i = 28; i >= 4; i -= 4) {
__ st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
as_FloatRegister(i+3), __ T1D, Address(__ post(sp, rscratch1)));
}
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
for (int i = 0; i <= 28; i += 4) {
__ ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
as_FloatRegister(i+3), __ T1D, Address(__ post(sp, step)));
}
// Make sure dst has the return value.
if (dst != r0) {
__ mov(dst, r0);
}
__ pop(RegSet::range(r0,r28) - RegSet::of(dst), sp);
__ leave();
__ bind(done);
// Restore tmps
__ pop(savedRegs, sp);
}
#ifdef ASSERT
void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2) {
// Verify value
if (type == T_OBJECT || type == T_ARRAY) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (val != noreg) {
Label done;
// tmp1 and tmp2 are often set to noreg.
RegSet savedRegs = RegSet::of(rscratch1);
__ push(savedRegs, sp);
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
__ tst(val, rscratch1);
__ br(Assembler::EQ, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
__ pop(savedRegs, sp);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
}
#endif // ASSERT
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs) {
if (!is_oop) {
// Barrier not needed
return;
}
BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
assert_different_registers(src, count, rscratch1);
__ pusha();
if (count == c_rarg0) {
if (src == c_rarg1) {
// exactly backwards!!
__ mov(rscratch1, c_rarg0);
__ mov(c_rarg0, c_rarg1);
__ mov(c_rarg1, rscratch1);
} else {
__ mov(c_rarg1, count);
__ mov(c_rarg0, src);
}
} else {
__ mov(c_rarg0, src);
__ mov(c_rarg1, count);
}
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
__ popa();
BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
}
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
assert_different_registers(jni_env, robj, tmp);
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
// The Address offset is too large to direct load - -784. Our range is +127, -128.
__ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
in_bytes(JavaThread::jni_environment_offset())));
// Load address bad mask
__ add(tmp, jni_env, tmp);
__ ldr(tmp, Address(tmp));
// Check address bad mask
__ tst(robj, tmp);
__ br(Assembler::NE, slowpath);
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
assert_different_registers(rheapbase, rthread, ref->as_register());
__ ldr(rheapbase, address_bad_mask_from_thread(rthread));
__ tst(ref->as_register(), rheapbase);
}
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
Register tmp = noreg;
if (stub->tmp()->is_valid()) {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = tmp = stub->tmp()->as_pointer_register();
} else {
// Address already in register
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save r0 unless it is the result or tmp register
// Set up SP to accomodate parameters and maybe r0..
if (ref != r0 && tmp != r0) {
__ sub(sp, sp, 32);
__ str(r0, Address(sp, 16));
} else {
__ sub(sp, sp, 16);
}
// Setup arguments and call runtime stub
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ far_call(stub->runtime_stub());
// Verify result
__ verify_oop(r0, "Bad oop");
// Move result into place
if (ref != r0) {
__ mov(ref, r0);
}
// Restore r0 unless it is the result or tmp register
if (ref != r0 && tmp != r0) {
__ ldr(r0, Address(sp, 16));
__ add(sp, sp, 32);
} else {
__ add(sp, sp, 16);
}
// Stub exit
__ b(*stub->continuation());
}
#undef __
#define __ sasm->
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
__ prologue("zgc_load_barrier stub", false);
// We don't use push/pop_clobbered_registers() - we need to pull out the result from r0.
for (int i = 0; i < 32; i +=2) {
__ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16)));
}
RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0);
__ push(saveRegs, sp);
// Setup arguments
__ load_parameter(0, c_rarg0);
__ load_parameter(1, c_rarg1);
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
__ pop(saveRegs, sp);
for (int i = 30; i >0; i -=2) {
__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16)));
}
__ epilogue();
}
#endif // COMPILER1
#undef __
#define __ cgen->assembler()->
// Generates a register specific stub for calling
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
//
// The raddr register serves as both input and output for this stub. When the stub is
// called the raddr register contains the object field address (oop*) where the bad oop
// was loaded from, which caused the slow path to be taken. On return from the stub the
// raddr register contains the good/healed oop returned from
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
// Don't generate stub for invalid registers
if (raddr == zr || raddr == r29 || raddr == r30) {
return NULL;
}
// Create stub name
char name[64];
const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
__ align(CodeEntryAlignment);
StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
address start = __ pc();
// Save live registers
RegSet savedRegs = RegSet::range(r0,r18) - RegSet::of(raddr);
__ enter();
__ push(savedRegs, sp);
// Setup arguments
if (raddr != c_rarg1) {
__ mov(c_rarg1, raddr);
}
__ ldr(c_rarg0, Address(raddr));
// Call barrier function
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Move result returned in r0 to raddr, if needed
if (raddr != r0) {
__ mov(raddr, r0);
}
__ pop(savedRegs, sp);
__ leave();
__ ret(lr);
return start;
}
#undef __
static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
const int nregs = 28; // Exclude FP, XZR, SP from calculation.
const int code_size = nregs * 254; // Rough estimate of code size
ResourceMark rm;
CodeBuffer buf(BufferBlob::create(label, code_size));
StubCodeGenerator cgen(&buf);
for (int i = 0; i < nregs; i++) {
const Register reg = as_Register(i);
stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
}
}
void ZBarrierSetAssembler::barrier_stubs_init() {
barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
}
address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
return _load_barrier_slow_stub[reg->encoding()];
}
address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
return _load_barrier_weak_slow_stub[reg->encoding()];
}

@ -0,0 +1,92 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
#ifdef COMPILER1
class LIR_Assembler;
class LIR_OprDesc;
typedef LIR_OprDesc* LIR_Opr;
class StubAssembler;
class ZLoadBarrierStubC1;
#endif // COMPILER1
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
private:
address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
public:
ZBarrierSetAssembler();
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath);
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
virtual void barrier_stubs_init();
address load_barrier_slow_stub(Register reg);
address load_barrier_weak_slow_stub(Register reg);
};
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP

@ -0,0 +1,174 @@
//
// Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/z/c2/zBarrierSetC2.hpp"
%}
source %{
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst,
Register base, int index, int scale,
int disp, bool weak) {
const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
: ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
if (index == -1) {
if (disp != 0) {
__ lea(dst, Address(base, disp));
} else {
__ mov(dst, base);
}
} else {
Register index_reg = as_Register(index);
if (disp == 0) {
__ lea(dst, Address(base, index_reg, Address::lsl(scale)));
} else {
__ lea(dst, Address(base, disp));
__ lea(dst, Address(dst, index_reg, Address::lsl(scale)));
}
}
__ far_call(RuntimeAddress(stub));
}
%}
//
// Execute ZGC load barrier (strong) slow path
//
instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
vRegD_V30 v30, vRegD_V31 v31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(!n->as_LoadBarrierSlowReg()->is_weak());
effect(DEF dst, KILL cr,
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
KILL v29, KILL v30, KILL v31);
format %{"LoadBarrierSlowReg $dst, $mem" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
$mem$$index, $mem$$scale, $mem$$disp, false);
%}
ins_pipe(pipe_slow);
%}
//
// Execute ZGC load barrier (weak) slow path
//
instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
vRegD_V30 v30, vRegD_V31 v31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(n->as_LoadBarrierSlowReg()->is_weak());
effect(DEF dst, KILL cr,
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
KILL v29, KILL v30, KILL v31);
format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
$mem$$index, $mem$$scale, $mem$$disp, true);
%}
ins_pipe(pipe_slow);
%}
// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
// but doesn't affect output.
instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem,
iRegP oldval, iRegP newval, iRegP keepalive,
rFlagsReg cr) %{
match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct z_compareAndSwapP(iRegINoSp res,
indirect mem,
iRegP oldval, iRegP newval, iRegP keepalive,
rFlagsReg cr) %{
match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev,
iRegP keepalive) %{
match(Set prev (ZGetAndSetP mem (Binary newv keepalive)));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}

@ -46,6 +46,9 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zThreadLocalData.hpp"
#endif
#ifdef BUILTIN_SIM
#include "../../../../../../simulator/simulator.hpp"
@ -580,6 +583,16 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ cbz(r0, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if mask is good.
// verifies that ZAddressBadMask & r0 == 0
__ ldr(c_rarg3, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
__ andr(c_rarg2, r0, c_rarg3);
__ cbnz(c_rarg2, error);
}
#endif
// Check if the oop is in the right area of memory
__ mov(c_rarg3, (intptr_t) Universe::verify_oop_mask());
__ andr(c_rarg2, r0, c_rarg3);

@ -0,0 +1,34 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/debug.hpp"
void ZArguments::initialize_platform() {
// Disable class unloading - we don't support concurrent class unloading yet.
FLAG_SET_DEFAULT(ClassUnloading, false);
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
}

@ -0,0 +1,590 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zBackingFile_linux_aarch64.hpp"
#include "gc/z/zBackingPath_linux_aarch64.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "logging/log.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
//
// Support for building on older Linux systems
//
// System calls
#ifndef SYS_fallocate
#define SYS_fallocate 47
#endif
#ifndef SYS_memfd_create
#define SYS_memfd_create 279
#endif
// memfd_create(2) flags
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_HUGETLB
#define MFD_HUGETLB 0x0004U
#endif
// open(2) flags
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
// fallocate(2) flags
#ifndef FALLOC_FL_KEEP_SIZE
#define FALLOC_FL_KEEP_SIZE 0x01
#endif
#ifndef FALLOC_FL_PUNCH_HOLE
#define FALLOC_FL_PUNCH_HOLE 0x02
#endif
// Filesystem types, see statfs(2)
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
// Filesystem names
#define ZFILESYSTEM_TMPFS "tmpfs"
#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
// Sysfs file for transparent huge page on tmpfs
#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
// Java heap filename
#define ZFILENAME_HEAP "java_heap"
// Preferred tmpfs mount points, ordered by priority
static const char* z_preferred_tmpfs_mountpoints[] = {
"/dev/shm",
"/run/shm",
NULL
};
// Preferred hugetlbfs mount points, ordered by priority
static const char* z_preferred_hugetlbfs_mountpoints[] = {
"/dev/hugepages",
"/hugepages",
NULL
};
static int z_fallocate_hugetlbfs_attempts = 3;
static bool z_fallocate_supported = true;
static int z_fallocate(int fd, int mode, size_t offset, size_t length) {
return syscall(SYS_fallocate, fd, mode, offset, length);
}
static int z_memfd_create(const char *name, unsigned int flags) {
return syscall(SYS_memfd_create, name, flags);
}
ZBackingFile::ZBackingFile() :
_fd(-1),
_size(0),
_filesystem(0),
_block_size(0),
_available(0),
_initialized(false) {
// Create backing file
_fd = create_fd(ZFILENAME_HEAP);
if (_fd == -1) {
return;
}
// Get filesystem statistics
struct statfs buf;
if (fstatfs(_fd, &buf) == -1) {
ZErrno err;
log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
return;
}
_filesystem = buf.f_type;
_block_size = buf.f_bsize;
_available = buf.f_bavail * _block_size;
// Make sure we're on a supported filesystem
if (!is_tmpfs() && !is_hugetlbfs()) {
log_error(gc)("Backing file must be located on a %s or a %s filesystem",
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
return;
}
// Make sure the filesystem type matches requested large page type
if (ZLargePages::is_transparent() && !is_tmpfs()) {
log_error(gc)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
"when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
ZFILESYSTEM_HUGETLBFS);
return;
}
const size_t expected_block_size = is_tmpfs() ? os::vm_page_size() : os::large_page_size();
if (expected_block_size != _block_size) {
log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
is_tmpfs() ? ZFILESYSTEM_TMPFS : ZFILESYSTEM_HUGETLBFS, _block_size, expected_block_size);
return;
}
// Successfully initialized
_initialized = true;
}
int ZBackingFile::create_mem_fd(const char* name) const {
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
// Create file
const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
if (fd == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create memfd file (%s)",
((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
return -1;
}
log_info(gc, init)("Heap backed by file: /memfd:%s", filename);
return fd;
}
int ZBackingFile::create_file_fd(const char* name) const {
const char* const filesystem = ZLargePages::is_explicit()
? ZFILESYSTEM_HUGETLBFS
: ZFILESYSTEM_TMPFS;
const char** const preferred_mountpoints = ZLargePages::is_explicit()
? z_preferred_hugetlbfs_mountpoints
: z_preferred_tmpfs_mountpoints;
// Find mountpoint
ZBackingPath path(filesystem, preferred_mountpoints);
if (path.get() == NULL) {
log_error(gc)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
return -1;
}
// Try to create an anonymous file using the O_TMPFILE flag. Note that this
// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
const int fd_anon = os::open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create anonymous file in %s (%s)", path.get(),
(err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
struct stat stat_buf;
if (fstat(fd_anon, &stat_buf) == -1) {
ZErrno err;
log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
return -1;
}
log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
return fd_anon;
}
log_debug(gc, init)("Falling back to open/unlink");
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
// Create file
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd == -1) {
ZErrno err;
log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
return -1;
}
// Unlink file
if (unlink(filename) == -1) {
ZErrno err;
log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
return -1;
}
log_info(gc, init)("Heap backed by file: %s", filename);
return fd;
}
int ZBackingFile::create_fd(const char* name) const {
if (ZPath == NULL) {
// If the path is not explicitly specified, then we first try to create a memfd file
// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
// not be supported at all (requires kernel >= 3.17), or it might not support large
// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
// file on an accessible tmpfs or hugetlbfs mount point.
const int fd = create_mem_fd(name);
if (fd != -1) {
return fd;
}
log_debug(gc, init)("Falling back to searching for an accessible mount point");
}
return create_file_fd(name);
}
bool ZBackingFile::is_initialized() const {
return _initialized;
}
int ZBackingFile::fd() const {
return _fd;
}
size_t ZBackingFile::size() const {
return _size;
}
size_t ZBackingFile::available() const {
return _available;
}
bool ZBackingFile::is_tmpfs() const {
return _filesystem == TMPFS_MAGIC;
}
bool ZBackingFile::is_hugetlbfs() const {
return _filesystem == HUGETLBFS_MAGIC;
}
bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
// If the shmem_enabled file exists and is readable then we
// know the kernel supports transparent huge pages for tmpfs.
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
}
ZErrno ZBackingFile::fallocate_compat_ftruncate(size_t size) const {
while (ftruncate(_fd, size) == -1) {
if (errno != EINTR) {
// Failed
return errno;
}
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const {
// On hugetlbfs, mapping a file segment will fail immediately, without
// the need to touch the mapped pages first, if there aren't enough huge
// pages available to back the mapping.
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
if (addr == MAP_FAILED) {
// Failed
return errno;
}
// Once mapped, the huge pages are only reserved. We need to touch them
// to associate them with the file segment. Note that we can not punch
// hole in file segments which only have reserved pages.
if (touch) {
char* const start = (char*)addr;
char* const end = start + length;
os::pretouch_memory(start, end, _block_size);
}
// Unmap again. From now on, the huge pages that were mapped are allocated
// to this file. There's no risk in getting SIGBUS when touching them.
if (munmap(addr, length) == -1) {
// Failed
return errno;
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate_compat_pwrite(size_t offset, size_t length) const {
uint8_t data = 0;
// Allocate backing memory by writing to each block
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
// Failed
return errno;
}
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate_fill_hole_compat(size_t offset, size_t length) {
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
// ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs).
const size_t end = offset + length;
if (end > _size) {
// Increase file size
const ZErrno err = fallocate_compat_ftruncate(end);
if (err) {
// Failed
return err;
}
}
// Allocate backing memory
const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */)
: fallocate_compat_pwrite(offset, length);
if (err) {
if (end > _size) {
// Restore file size
fallocate_compat_ftruncate(_size);
}
// Failed
return err;
}
if (end > _size) {
// Record new file size
_size = end;
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate_fill_hole_syscall(size_t offset, size_t length) {
const int mode = 0; // Allocate
const int res = z_fallocate(_fd, mode, offset, length);
if (res == -1) {
// Failed
return errno;
}
const size_t end = offset + length;
if (end > _size) {
// Record new file size
_size = end;
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate_fill_hole(size_t offset, size_t length) {
// Using compat mode is more efficient when allocating space on hugetlbfs.
// Note that allocating huge pages this way will only reserve them, and not
// associate them with segments of the file. We must guarantee that we at
// some point touch these segments, otherwise we can not punch hole in them.
if (z_fallocate_supported && !is_hugetlbfs()) {
const ZErrno err = fallocate_fill_hole_syscall(offset, length);
if (!err) {
// Success
return 0;
}
if (err != ENOSYS && err != EOPNOTSUPP) {
// Failed
return err;
}
// Not supported
log_debug(gc)("Falling back to fallocate() compatibility mode");
z_fallocate_supported = false;
}
return fallocate_fill_hole_compat(offset, length);
}
ZErrno ZBackingFile::fallocate_punch_hole(size_t offset, size_t length) {
if (is_hugetlbfs()) {
// We can only punch hole in pages that have been touched. Non-touched
// pages are only reserved, and not associated with any specific file
// segment. We don't know which pages have been previously touched, so
// we always touch them here to guarantee that we can punch hole.
const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */);
if (err) {
// Failed
return err;
}
}
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
if (z_fallocate(_fd, mode, offset, length) == -1) {
// Failed
return errno;
}
// Success
return 0;
}
ZErrno ZBackingFile::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
// Try first half
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, _block_size);
const ZErrno err0 = fallocate(punch_hole, offset0, length0);
if (err0) {
return err0;
}
// Try second half
const size_t offset1 = offset0 + length0;
const size_t length1 = length - length0;
const ZErrno err1 = fallocate(punch_hole, offset1, length1);
if (err1) {
return err1;
}
// Success
return 0;
}
ZErrno ZBackingFile::fallocate(bool punch_hole, size_t offset, size_t length) {
assert(is_aligned(offset, _block_size), "Invalid offset");
assert(is_aligned(length, _block_size), "Invalid length");
const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
if (err == EINTR && length > _block_size) {
// Calling fallocate(2) with a large length can take a long time to
// complete. When running profilers, such as VTune, this syscall will
// be constantly interrupted by signals. Expanding the file in smaller
// steps avoids this problem.
return split_and_fallocate(punch_hole, offset, length);
}
return err;
}
bool ZBackingFile::commit_inner(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
retry:
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
if (err) {
if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) {
// If we fail to allocate during initialization, due to lack of space on
// the hugetlbfs filesystem, then we wait and retry a few times before
// giving up. Otherwise there is a risk that running JVMs back-to-back
// will fail, since there is a delay between process termination and the
// huge pages owned by that process being returned to the huge page pool
// and made available for new allocations.
log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
// Wait and retry in one second, in the hope that huge pages will be
// available by then.
sleep(1);
goto retry;
}
// Failed
log_error(gc)("Failed to commit memory (%s)", err.to_string());
return false;
}
// Success
return true;
}
size_t ZBackingFile::commit(size_t offset, size_t length) {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
return length;
}
// Failed, try to commit as much as possible
size_t start = offset;
size_t end = offset + length;
for (;;) {
length = align_down((end - start) / 2, ZGranuleSize);
if (length < ZGranuleSize) {
// Done, don't commit more
return start - offset;
}
if (commit_inner(start, length)) {
// Success, try commit more
start += length;
} else {
// Failed, try commit less
end -= length;
}
}
}
size_t ZBackingFile::uncommit(size_t offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
if (err) {
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
return 0;
}
return length;
}

@ -0,0 +1,73 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGFILE_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGFILE_LINUX_AARCH64_HPP
#include "memory/allocation.hpp"
class ZErrno;
class ZBackingFile {
private:
int _fd;
size_t _size;
uint64_t _filesystem;
size_t _block_size;
size_t _available;
bool _initialized;
int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
int create_fd(const char* name) const;
bool is_tmpfs() const;
bool is_hugetlbfs() const;
bool tmpfs_supports_transparent_huge_pages() const;
ZErrno fallocate_compat_ftruncate(size_t size) const;
ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const;
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
ZErrno fallocate_fill_hole(size_t offset, size_t length);
ZErrno fallocate_punch_hole(size_t offset, size_t length);
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
bool commit_inner(size_t offset, size_t length);
public:
ZBackingFile();
bool is_initialized() const;
int fd() const;
size_t size() const;
size_t available() const;
size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
};
#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGFILE_LINUX_AARCH64_HPP

@ -0,0 +1,149 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "zBackingPath_linux_aarch64.hpp"
#include "gc/z/zErrno.hpp"
#include "logging/log.hpp"
#include <stdio.h>
#include <unistd.h>
// Mount information, see proc(5) for more details.
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
ZBackingPath::ZBackingPath(const char* filesystem, const char** preferred_mountpoints) {
if (ZPath != NULL) {
// Use specified path
_path = strdup(ZPath);
} else {
// Find suitable path
_path = find_mountpoint(filesystem, preferred_mountpoints);
}
}
ZBackingPath::~ZBackingPath() {
free(_path);
_path = NULL;
}
char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
char* line_mountpoint = NULL;
char* line_filesystem = NULL;
// Parse line and return a newly allocated string containing the mount point if
// the line contains a matching filesystem and the mount point is accessible by
// the current user.
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
strcmp(line_filesystem, filesystem) != 0 ||
access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
// Not a matching or accessible filesystem
free(line_mountpoint);
line_mountpoint = NULL;
}
free(line_filesystem);
return line_mountpoint;
}
void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const {
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
if (fd == NULL) {
ZErrno err;
log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
return;
}
char* line = NULL;
size_t length = 0;
while (getline(&line, &length, fd) != -1) {
char* const mountpoint = get_mountpoint(line, filesystem);
if (mountpoint != NULL) {
mountpoints->add(mountpoint);
}
}
free(line);
fclose(fd);
}
void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
ZArrayIterator<char*> iter(mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
free(mountpoint);
}
mountpoints->clear();
}
char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
ZArray<char*>* mountpoints,
const char** preferred_mountpoints) const {
// Find preferred mount point
ZArrayIterator<char*> iter1(mountpoints);
for (char* mountpoint; iter1.next(&mountpoint);) {
for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) {
if (!strcmp(mountpoint, *preferred)) {
// Preferred mount point found
return strdup(mountpoint);
}
}
}
// Preferred mount point not found
log_error(gc)("More than one %s filesystem found:", filesystem);
ZArrayIterator<char*> iter2(mountpoints);
for (char* mountpoint; iter2.next(&mountpoint);) {
log_error(gc)(" %s", mountpoint);
}
return NULL;
}
char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
char* path = NULL;
ZArray<char*> mountpoints;
get_mountpoints(filesystem, &mountpoints);
if (mountpoints.size() == 0) {
// No mount point found
log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
} else if (mountpoints.size() == 1) {
// One mount point found
path = strdup(mountpoints.at(0));
} else {
// More than one mount point found
path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
}
free_mountpoints(&mountpoints);
return path;
}
const char* ZBackingPath::get() const {
return _path;
}

@ -0,0 +1,52 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGPATH_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGPATH_LINUX_AARCH64_HPP
#include "gc/z/zArray.hpp"
#include "memory/allocation.hpp"
class ZBackingPath : public StackObj {
private:
char* _path;
char* get_mountpoint(const char* line,
const char* filesystem) const;
void get_mountpoints(const char* filesystem,
ZArray<char*>* mountpoints) const;
void free_mountpoints(ZArray<char*>* mountpoints) const;
char* find_preferred_mountpoint(const char* filesystem,
ZArray<char*>* mountpoints,
const char** preferred_mountpoints) const;
char* find_mountpoint(const char* filesystem,
const char** preferred_mountpoints) const;
public:
ZBackingPath(const char* filesystem, const char** preferred_mountpoints);
~ZBackingPath();
const char* get() const;
};
#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZBACKINGPATH_LINUX_AARCH64_HPP

@ -0,0 +1,173 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
//
// The heap can have three different layouts, depending on the max heap size.
//
// Address Space & Pointer Layout 1
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
//
// Address Space & Pointer Layout 2
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | Marked0 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 7 6 3 2 0
// +------------------+-----+------------------------------------------------+
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
// +-------------------+----+------------------------------------------------+
// | | |
// | | * 42-0 Object Offset (43-bits, 8TB address space)
// | |
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
// | 0010 = Marked1 (Address view 16-24TB)
// | 0100 = Remapped (Address view 32-40TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-47 Fixed (17-bits, always zero)
//
//
// Address Space & Pointer Layout 3
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | Marked0 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 8 7 4 3 0
// +------------------+----+-------------------------------------------------+
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
// +------------------+----+-------------------------------------------------+
// | | |
// | | * 43-0 Object Offset (44-bits, 16TB address space)
// | |
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
// | 0010 = Marked1 (Address view 32-48TB)
// | 0100 = Remapped (Address view 64-80TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-48 Fixed (16-bits, always zero)
//
uintptr_t ZPlatformAddressSpaceStart() {
const uintptr_t first_heap_view_address = (uintptr_t)1 << (ZPlatformAddressMetadataShift() + 0);
const size_t min_address_offset = 0;
return first_heap_view_address + min_address_offset;
}
uintptr_t ZPlatformAddressSpaceEnd() {
const uintptr_t last_heap_view_address = (uintptr_t)1 << (ZPlatformAddressMetadataShift() + 2);
const size_t max_address_offset = (size_t)1 << ZPlatformAddressOffsetBits();
return last_heap_view_address + max_address_offset;
}
uintptr_t ZPlatformAddressReservedStart() {
return ZPlatformAddressSpaceStart();
}
uintptr_t ZPlatformAddressReservedEnd() {
return ZPlatformAddressSpaceEnd();
}
uintptr_t ZPlatformAddressBase() {
return 0;
}
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
const size_t virtual_to_physical_ratio = 7; // 7:1
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
const size_t address_offset_bits = log2_intptr(address_offset);
return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
return ZPlatformAddressOffsetBits();
}

@ -0,0 +1,51 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP
//
// Page Allocation Tiers
// ---------------------
//
// Page Type Page Size Object Size Limit Object Alignment
// ------------------------------------------------------------------
// Small 2M <= 265K <MinObjAlignmentInBytes>
// Medium 32M <= 4M 4K
// Large X*M > 4M 2M
// ------------------------------------------------------------------
//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
uintptr_t ZPlatformAddressSpaceStart();
uintptr_t ZPlatformAddressSpaceEnd();
uintptr_t ZPlatformAddressReservedStart();
uintptr_t ZPlatformAddressReservedEnd();
uintptr_t ZPlatformAddressBase();
size_t ZPlatformAddressOffsetBits();
size_t ZPlatformAddressMetadataShift();
#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZGLOBALS_LINUX_AARCH64_HPP

@ -0,0 +1,333 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBackingFile_linux_aarch64.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMemory.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "gc/z/zPhysicalMemoryBacking_linux_aarch64.hpp"
#include "logging/log.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <stdio.h>
#include <sys/mman.h>
#include <sys/types.h>
//
// Support for building on older Linux systems
//
// madvise(2) flags
#ifndef MADV_HUGEPAGE
#define MADV_HUGEPAGE 14
#endif
// Proc file entry for max map mount
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
bool ZPhysicalMemoryBacking::is_initialized() const {
return _file.is_initialized();
}
void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
// Note that the available space on a tmpfs or a hugetlbfs filesystem
// will be zero if no size limit was specified when it was mounted.
const size_t available = _file.available();
if (available == 0) {
// No size limit set, skip check
log_info(gc, init)("Available space on backing filesystem: N/A");
return;
}
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", available / M);
// Warn if the filesystem doesn't currently have enough space available to hold
// the max heap size. The max heap size will be capped if we later hit this limit
// when trying to expand the heap.
if (available < max) {
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
"(available", max / M);
log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
"size could", available / M);
log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
}
}
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
// Failed to open file, skip check
log_debug(gc, init)("Failed to open %s", filename);
return;
}
size_t actual_max_map_count = 0;
const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
fclose(file);
if (result != 1) {
// Failed to read file, skip check
log_debug(gc, init)("Failed to read %s", filename);
return;
}
// The required max map count is impossible to calculate exactly since subsystems
// other than ZGC are also creating memory mappings, and we have no control over that.
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given");
log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
max / M, filename);
log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
"with the current", required_max_map_count, actual_max_map_count);
log_warning(gc)("limit could lead to a fatal error, due to failure to map memory.");
}
}
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
// Warn if available space is too low
warn_available_space(max);
// Warn if max map count is too low
warn_max_map_count(max);
}
bool ZPhysicalMemoryBacking::supports_uncommit() {
assert(!is_init_completed(), "Invalid state");
assert(_file.size() >= ZGranuleSize, "Invalid size");
// Test if uncommit is supported by uncommitting and then re-committing a granule
return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
}
size_t ZPhysicalMemoryBacking::commit(size_t size) {
size_t committed = 0;
// Fill holes in the backing file
while (committed < size) {
size_t allocated = 0;
const size_t remaining = size - committed;
const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
if (start == UINTPTR_MAX) {
// No holes to commit
break;
}
// Try commit hole
const size_t filled = _file.commit(start, allocated);
if (filled > 0) {
// Successful or partialy successful
_committed.free(start, filled);
committed += filled;
}
if (filled < allocated) {
// Failed or partialy failed
_uncommitted.free(start + filled, allocated - filled);
return committed;
}
}
// Expand backing file
if (committed < size) {
const size_t remaining = size - committed;
const uintptr_t start = _file.size();
const size_t expanded = _file.commit(start, remaining);
if (expanded > 0) {
// Successful or partialy successful
_committed.free(start, expanded);
committed += expanded;
}
}
return committed;
}
size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
size_t uncommitted = 0;
// Punch holes in backing file
while (uncommitted < size) {
size_t allocated = 0;
const size_t remaining = size - uncommitted;
const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
assert(start != UINTPTR_MAX, "Allocation should never fail");
// Try punch hole
const size_t punched = _file.uncommit(start, allocated);
if (punched > 0) {
// Successful or partialy successful
_uncommitted.free(start, punched);
uncommitted += punched;
}
if (punched < allocated) {
// Failed or partialy failed
_committed.free(start + punched, allocated - punched);
return uncommitted;
}
}
return uncommitted;
}
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
assert(is_aligned(size, ZGranuleSize), "Invalid size");
ZPhysicalMemory pmem;
// Allocate segments
for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
assert(start != UINTPTR_MAX, "Allocation should never fail");
pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
}
return pmem;
}
void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
const size_t nsegments = pmem.nsegments();
// Free segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
_committed.free(segment.start(), segment.size());
}
}
void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
if (err == ENOMEM) {
fatal("Failed to map memory. Please check the system limit on number of "
"memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
} else {
fatal("Failed to map memory (%s)", err.to_string());
}
}
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size, int advice) const {
if (madvise((void*)addr, size, advice) == -1) {
ZErrno err;
log_error(gc)("Failed to advise on memory (advice %d, %s)", advice, err.to_string());
}
}
void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
const size_t nsegments = pmem.nsegments();
size_t size = 0;
// Map segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
const uintptr_t segment_addr = addr + size;
const void* const res = mmap((void*)segment_addr, segment.size(), PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
size += segment.size();
}
// Advise on use of transparent huge pages before touching it
if (ZLargePages::is_transparent()) {
advise_view(addr, size, MADV_HUGEPAGE);
}
// NUMA interleave memory before touching it
ZNUMA::memory_interleave(addr, size);
// Pre-touch memory
if (pretouch) {
pretouch_view(addr, size);
}
}
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
// Note that we must keep the address space reservation intact and just detach
// the backing memory. For this reason we map a new anonymous, non-accessible
// and non-reserved page over the mapping instead of actually unmapping.
const void* const res = mmap((void*)addr, pmem.size(), PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
}
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
// From an NMT point of view we treat the first heap view (marked0) as committed
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
}
}
void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Unmap good view
unmap_view(pmem, ZAddress::good(offset));
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
}
}
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
}
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
unmap_view(pmem, ZAddress::good(offset));
}

@ -0,0 +1,70 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP
#include "gc/z/zBackingFile_linux_aarch64.hpp"
#include "gc/z/zMemory.hpp"
class ZErrno;
class ZPhysicalMemory;
class ZPhysicalMemoryBacking {
private:
ZBackingFile _file;
ZMemoryManager _committed;
ZMemoryManager _uncommitted;
void warn_available_space(size_t max) const;
void warn_max_map_count(size_t max) const;
void map_failed(ZErrno err) const;
void advise_view(uintptr_t addr, size_t size, int advice) const;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
public:
bool is_initialized() const;
void warn_commit_limits(size_t max) const;
bool supports_uncommit();
size_t commit(size_t size);
size_t uncommit(size_t size);
ZPhysicalMemory alloc(size_t size);
void free(const ZPhysicalMemory& pmem);
uintptr_t nmt_address(uintptr_t offset) const;
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
};
#endif // OS_CPU_LINUX_AARCH64_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_AARCH64_HPP