8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)

Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Kim Barrett <kim.barrett@oracle.com>
Co-authored-by: Nils Eliasson <nils.eliasson@oracle.com>
Co-authored-by: Rickard Backman <rickard.backman@oracle.com>
Co-authored-by: Roland Westrelin <rwestrel@redhat.com>
Co-authored-by: Coleen Phillimore <coleen.phillimore@oracle.com>
Co-authored-by: Robbin Ehn <robbin.ehn@oracle.com>
Co-authored-by: Gerard Ziemski <gerard.ziemski@oracle.com>
Co-authored-by: Hugh Wilkinson <hugh.wilkinson@intel.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Co-authored-by: Bill Wheeler <bill.npo.wheeler@intel.com>
Co-authored-by: Vinay K. Awasthi <vinay.k.awasthi@intel.com>
Co-authored-by: Yasumasa Suenaga <yasuenag@gmail.com>
Reviewed-by: pliden, stefank, eosterlund, ehelin, sjohanss, rbackman, coleenp, ihse, jgeorge, lmesnik, rkennke
This commit is contained in:
Per Lidén 2018-06-12 17:40:28 +02:00
parent 312328b44c
commit e7af7a4aef
322 changed files with 27196 additions and 79 deletions
make
src/hotspot
cpu/x86
os_cpu/linux_x86/gc/z
share

@ -25,7 +25,7 @@
# All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc epsilongc nmt cds \
graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
static-build link-time-opt aot jfr"
# Deprecated JVM features (these are ignored, but with a warning)
@ -328,6 +328,19 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
fi
# Only enable ZGC on Linux x86_64
AC_MSG_CHECKING([if zgc should be built])
if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
AC_MSG_RESULT([yes])
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
AC_MSG_RESULT([no, platform not supported])
fi
else
AC_MSG_RESULT([no])
fi
# Turn on additional features based on other parts of configure
if test "x$INCLUDE_DTRACE" = "xtrue"; then
JVM_FEATURES="$JVM_FEATURES dtrace"

@ -694,6 +694,14 @@ var getJibProfilesProfiles = function (input, common, data) {
profiles[openName].artifacts["jdk"].remote));
});
// Enable ZGC in linux-x64-open builds
[ "linux-x64-open" ].forEach(function (name) {
var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
var debugName = name + common.debug_suffix;
profiles[name] = concatObjects(profiles[name], configureArgs);
profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
});
// Profiles used to run tests. Used in JPRT and Mach 5.
var testOnlyProfiles = {
"run-test-jprt": {

@ -160,6 +160,11 @@ ifneq ($(call check-jvm-feature, epsilongc), true)
JVM_EXCLUDE_PATTERNS += gc/epsilon
endif
ifneq ($(call check-jvm-feature, zgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
JVM_EXCLUDE_PATTERNS += gc/z
endif
ifneq ($(call check-jvm-feature, jfr), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
JVM_EXCLUDE_PATTERNS += jfr

@ -1346,7 +1346,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
#endif
__ verify_oop(dest->as_register());
// Load barrier has not yet been applied, so ZGC can't verify the oop here
if (!UseZGC) {
__ verify_oop(dest->as_register());
}
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedClassPointers) {

@ -0,0 +1,458 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#endif // COMPILER1
#undef __
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
static void call_vm(MacroAssembler* masm,
address entry_point,
Register arg0,
Register arg1) {
// Setup arguments
if (arg1 == c_rarg0) {
if (arg0 == c_rarg1) {
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, arg1);
__ movptr(c_rarg0, arg0);
}
} else {
if (arg0 != c_rarg0) {
__ movptr(c_rarg0, arg0);
}
if (arg1 != c_rarg1) {
__ movptr(c_rarg1, arg1);
}
}
// Call VM
__ MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread) {
if (!ZBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
// Allocate scratch register
Register scratch = tmp1;
if (tmp1 == noreg) {
scratch = r12;
__ push(scratch);
}
assert_different_registers(dst, scratch);
Label done;
//
// Fast Path
//
// Load address
__ lea(scratch, src);
// Load oop at address
__ movptr(dst, Address(scratch, 0));
// Test address bad mask
__ testptr(dst, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
//
// Slow path
//
// Save registers
__ push(rax);
__ push(rcx);
__ push(rdx);
__ push(rdi);
__ push(rsi);
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
// We may end up here from generate_native_wrapper, then the method may have
// floats as arguments, and we must spill them before calling the VM runtime
// leaf. From the interpreter all floats are passed on the stack.
assert(Argument::n_float_register_parameters_j == 8, "Assumption");
const int xmm_size = wordSize * 2;
const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
__ subptr(rsp, xmm_spill_size);
__ movdqu(Address(rsp, xmm_size * 7), xmm7);
__ movdqu(Address(rsp, xmm_size * 6), xmm6);
__ movdqu(Address(rsp, xmm_size * 5), xmm5);
__ movdqu(Address(rsp, xmm_size * 4), xmm4);
__ movdqu(Address(rsp, xmm_size * 3), xmm3);
__ movdqu(Address(rsp, xmm_size * 2), xmm2);
__ movdqu(Address(rsp, xmm_size * 1), xmm1);
__ movdqu(Address(rsp, xmm_size * 0), xmm0);
// Call VM
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
// Restore registers
__ movdqu(xmm0, Address(rsp, xmm_size * 0));
__ movdqu(xmm1, Address(rsp, xmm_size * 1));
__ movdqu(xmm2, Address(rsp, xmm_size * 2));
__ movdqu(xmm3, Address(rsp, xmm_size * 3));
__ movdqu(xmm4, Address(rsp, xmm_size * 4));
__ movdqu(xmm5, Address(rsp, xmm_size * 5));
__ movdqu(xmm6, Address(rsp, xmm_size * 6));
__ movdqu(xmm7, Address(rsp, xmm_size * 7));
__ addptr(rsp, xmm_spill_size);
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
__ pop(rsi);
__ pop(rdi);
__ pop(rdx);
__ pop(rcx);
if (dst == rax) {
__ addptr(rsp, wordSize);
} else {
__ movptr(dst, rax);
__ pop(rax);
}
__ bind(done);
// Restore scratch register
if (tmp1 == noreg) {
__ pop(scratch);
}
BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
}
#ifdef ASSERT
void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2) {
BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
// Verify oop store
if (type == T_OBJECT || type == T_ARRAY) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (src != noreg) {
Label done;
__ testptr(src, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
}
#endif // ASSERT
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count) {
if (!ZBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
return;
}
BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
// Save registers
__ pusha();
// Call VM
call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
// Restore registers
__ popa();
BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
}
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
// Test address bad mask
__ testptr(obj, address_bad_mask_from_jni_env(jni_env));
__ jcc(Assembler::notZero, slowpath);
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
__ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
}
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
if (stub->ref_addr()->is_register()) {
// Address already in register
ref_addr = stub->ref_addr()->as_pointer_register();
} else {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
ref_addr = stub->tmp()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save rax unless it is the result register
if (ref != rax) {
__ push(rax);
}
// Setup arguments and call runtime stub
__ subptr(rsp, 2 * BytesPerWord);
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ call(RuntimeAddress(stub->runtime_stub()));
__ addptr(rsp, 2 * BytesPerWord);
// Verify result
__ verify_oop(rax, "Bad oop");
// Restore rax unless it is the result register
if (ref != rax) {
__ movptr(ref, rax);
__ pop(rax);
}
// Stub exit
__ jmp(*stub->continuation());
}
#undef __
#define __ sasm->
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
// Enter and save registers
__ enter();
__ save_live_registers_no_oop_map(true /* save_fpu_registers */);
// Setup arguments
__ load_parameter(1, c_rarg1);
__ load_parameter(0, c_rarg0);
// Call VM
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Restore registers and return
__ restore_live_registers_except_rax(true /* restore_fpu_registers */);
__ leave();
__ ret(0);
}
#endif // COMPILER1
#undef __
#define __ cgen->assembler()->
// Generates a register specific stub for calling
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
//
// The raddr register serves as both input and output for this stub. When the stub is
// called the raddr register contains the object field address (oop*) where the bad oop
// was loaded from, which caused the slow path to be taken. On return from the stub the
// raddr register contains the good/healed oop returned from
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
// Don't generate stub for invalid registers
if (raddr == rsp || raddr == r12 || raddr == r15) {
return NULL;
}
// Create stub name
char name[64];
const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
__ align(CodeEntryAlignment);
StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
address start = __ pc();
// Save live registers
if (raddr != rax) {
__ push(rax);
}
if (raddr != rcx) {
__ push(rcx);
}
if (raddr != rdx) {
__ push(rdx);
}
if (raddr != rsi) {
__ push(rsi);
}
if (raddr != rdi) {
__ push(rdi);
}
if (raddr != r8) {
__ push(r8);
}
if (raddr != r9) {
__ push(r9);
}
if (raddr != r10) {
__ push(r10);
}
if (raddr != r11) {
__ push(r11);
}
// Setup arguments
if (c_rarg1 != raddr) {
__ movq(c_rarg1, raddr);
}
__ movq(c_rarg0, Address(raddr, 0));
// Call barrier function
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Move result returned in rax to raddr, if needed
if (raddr != rax) {
__ movq(raddr, rax);
}
// Restore saved registers
if (raddr != r11) {
__ pop(r11);
}
if (raddr != r10) {
__ pop(r10);
}
if (raddr != r9) {
__ pop(r9);
}
if (raddr != r8) {
__ pop(r8);
}
if (raddr != rdi) {
__ pop(rdi);
}
if (raddr != rsi) {
__ pop(rsi);
}
if (raddr != rdx) {
__ pop(rdx);
}
if (raddr != rcx) {
__ pop(rcx);
}
if (raddr != rax) {
__ pop(rax);
}
__ ret(0);
return start;
}
#undef __
void ZBarrierSetAssembler::barrier_stubs_init() {
// Load barrier stubs
int stub_code_size = 256 * 16; // Rough estimate of code size
ResourceMark rm;
BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
CodeBuffer buf(bb);
StubCodeGenerator cgen(&buf);
Register rr = as_Register(0);
for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
_load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
_load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
rr = rr->successor();
}
}

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
#ifdef COMPILER1
class LIR_Assembler;
class LIR_OprDesc;
typedef LIR_OprDesc* LIR_Opr;
class StubAssembler;
class ZLoadBarrierStubC1;
#endif // COMPILER1
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
public:
ZBarrierSetAssembler() :
_load_barrier_slow_stub(),
_load_barrier_weak_slow_stub() {}
address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath);
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
ZLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
virtual void barrier_stubs_init();
};
#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP

@ -44,6 +44,9 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zThreadLocalData.hpp"
#endif
// Declaration and definition of StubGenerator (no .hpp file).
// For a more detailed description of the stub routine structure
@ -1026,6 +1029,15 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if metadata bits indicate a bad oop
__ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, error);
}
#endif
// Check if the oop is in the right area of memory
__ movptr(c_rarg2, rax);
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());

@ -1067,6 +1067,138 @@ reg_class vectorz_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM
#endif
);
reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
#ifdef _LP64
reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
#endif
%}

@ -538,6 +538,12 @@ reg_class int_rdi_reg(RDI);
%}
source_hpp %{
#if INCLUDE_ZGC
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@ -4221,6 +4227,135 @@ operand cmpOpUCF2() %{
%}
%}
// Operands for bound floating pointer register arguments
operand rxmm0() %{
constraint(ALLOC_IN_RC(xmm0_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX<= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm1() %{
constraint(ALLOC_IN_RC(xmm1_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm2() %{
constraint(ALLOC_IN_RC(xmm2_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm3() %{
constraint(ALLOC_IN_RC(xmm3_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm4() %{
constraint(ALLOC_IN_RC(xmm4_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm5() %{
constraint(ALLOC_IN_RC(xmm5_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm6() %{
constraint(ALLOC_IN_RC(xmm6_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm7() %{
constraint(ALLOC_IN_RC(xmm7_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm8() %{
constraint(ALLOC_IN_RC(xmm8_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm9() %{
constraint(ALLOC_IN_RC(xmm9_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm10() %{
constraint(ALLOC_IN_RC(xmm10_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm11() %{
constraint(ALLOC_IN_RC(xmm11_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm12() %{
constraint(ALLOC_IN_RC(xmm12_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm13() %{
constraint(ALLOC_IN_RC(xmm13_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm14() %{
constraint(ALLOC_IN_RC(xmm14_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm15() %{
constraint(ALLOC_IN_RC(xmm15_reg)); match(VecX);
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
%}
operand rxmm16() %{
constraint(ALLOC_IN_RC(xmm16_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm17() %{
constraint(ALLOC_IN_RC(xmm17_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm18() %{
constraint(ALLOC_IN_RC(xmm18_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm19() %{
constraint(ALLOC_IN_RC(xmm19_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm20() %{
constraint(ALLOC_IN_RC(xmm20_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm21() %{
constraint(ALLOC_IN_RC(xmm21_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm22() %{
constraint(ALLOC_IN_RC(xmm22_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm23() %{
constraint(ALLOC_IN_RC(xmm23_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm24() %{
constraint(ALLOC_IN_RC(xmm24_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm25() %{
constraint(ALLOC_IN_RC(xmm25_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm26() %{
constraint(ALLOC_IN_RC(xmm26_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm27() %{
constraint(ALLOC_IN_RC(xmm27_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm28() %{
constraint(ALLOC_IN_RC(xmm28_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm29() %{
constraint(ALLOC_IN_RC(xmm29_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm30() %{
constraint(ALLOC_IN_RC(xmm30_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
operand rxmm31() %{
constraint(ALLOC_IN_RC(xmm31_reg)); match(VecX);
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
%}
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
@ -11547,6 +11682,16 @@ instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
ins_pipe(ialu_cr_reg_mem);
%}
instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
%{
match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
format %{ "testq $src, $mem" %}
opcode(0x85);
ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
ins_pipe(ialu_cr_reg_mem);
%}
// Manifest a CmpL result in an integer register. Very painful.
// This is the test to avoid.
instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@ -12320,6 +12465,223 @@ instruct RethrowException()
ins_pipe(pipe_jmp);
%}
//
// Execute ZGC load barrier (strong) slow path
//
// When running without XMM regs
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
//
// Execute ZGC load barrier (weak) slow path
//
// When running without XMM regs
instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d, $mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15,
KILL x16, KILL x17, KILL x18, KILL x19,
KILL x20, KILL x21, KILL x22, KILL x23,
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
ins_encode %{
#if INCLUDE_ZGC
Register d = $dst$$Register;
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
assert(d != r12, "Can't be R12!");
assert(d != r15, "Can't be R15!");
assert(d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
#else
ShouldNotReachHere();
#endif
%}
ins_pipe(pipe_slow);
%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.

@ -0,0 +1,31 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
inline uintptr_t ZAddress::address(uintptr_t value) {
return value;
}
#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP

@ -0,0 +1,360 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zBackingPath_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "logging/log.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/types.h>
#include <unistd.h>
// Filesystem names
#define ZFILESYSTEM_TMPFS "tmpfs"
#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
// Sysfs file for transparent huge page on tmpfs
#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
// Default mount points
#define ZMOUNTPOINT_TMPFS "/dev/shm"
#define ZMOUNTPOINT_HUGETLBFS "/hugepages"
// Java heap filename
#define ZFILENAME_HEAP "java_heap"
// Support for building on older Linux systems
#ifndef __NR_memfd_create
#define __NR_memfd_create 319
#endif
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_HUGETLB
#define MFD_HUGETLB 0x0004U
#endif
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
// Filesystem types, see statfs(2)
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
static int z_memfd_create(const char *name, unsigned int flags) {
return syscall(__NR_memfd_create, name, flags);
}
ZBackingFile::ZBackingFile() :
_fd(-1),
_filesystem(0),
_initialized(false) {
// Create backing file
_fd = create_fd(ZFILENAME_HEAP);
if (_fd == -1) {
return;
}
// Get filesystem type
struct statfs statfs_buf;
if (fstatfs(_fd, &statfs_buf) == -1) {
ZErrno err;
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
return;
}
_filesystem = statfs_buf.f_type;
// Make sure we're on a supported filesystem
if (!is_tmpfs() && !is_hugetlbfs()) {
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
return;
}
// Make sure the filesystem type matches requested large page type
if (ZLargePages::is_transparent() && !is_tmpfs()) {
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
// Successfully initialized
_initialized = true;
}
int ZBackingFile::create_mem_fd(const char* name) const {
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
// Create file
const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
if (fd == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create memfd file (%s)",
((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
return -1;
}
log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
return fd;
}
int ZBackingFile::create_file_fd(const char* name) const {
const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
// Find mountpoint
ZBackingPath path(filesystem, mountpoint);
if (path.get() == NULL) {
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
return -1;
}
// Try to create an anonymous file using the O_TMPFILE flag. Note that this
// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
ZErrno err;
log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
(err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
struct stat stat_buf;
if (fstat(fd_anon, &stat_buf) == -1) {
ZErrno err;
log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
return -1;
}
log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
return fd_anon;
}
log_debug(gc, init)("Falling back to open/unlink");
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
// Create file
const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd == -1) {
ZErrno err;
log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
return -1;
}
// Unlink file
if (unlink(filename) == -1) {
ZErrno err;
log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
return -1;
}
log_debug(gc, init)("Heap backed by file %s", filename);
return fd;
}
int ZBackingFile::create_fd(const char* name) const {
if (ZPath == NULL) {
// If the path is not explicitly specified, then we first try to create a memfd file
// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
// not be supported at all (requires kernel >= 3.17), or it might not support large
// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
// file on an accessible tmpfs or hugetlbfs mount point.
const int fd = create_mem_fd(name);
if (fd != -1) {
return fd;
}
log_debug(gc, init)("Falling back to searching for an accessible moint point");
}
return create_file_fd(name);
}
bool ZBackingFile::is_initialized() const {
return _initialized;
}
int ZBackingFile::fd() const {
return _fd;
}
bool ZBackingFile::is_tmpfs() const {
return _filesystem == TMPFS_MAGIC;
}
bool ZBackingFile::is_hugetlbfs() const {
return _filesystem == HUGETLBFS_MAGIC;
}
bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
// If the shmem_enabled file exists and is readable then we
// know the kernel supports transparent huge pages for tmpfs.
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
}
bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
// Try first smaller part.
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, alignment);
if (!try_expand_tmpfs(offset0, length0, alignment)) {
return false;
}
// Try second smaller part.
const size_t offset1 = offset0 + length0;
const size_t length1 = length - length0;
if (!try_expand_tmpfs(offset1, length1, alignment)) {
return false;
}
return true;
}
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
assert(length > 0, "Invalid length");
assert(is_aligned(length, alignment), "Invalid length");
ZErrno err = posix_fallocate(_fd, offset, length);
if (err == EINTR && length > alignment) {
// Calling posix_fallocate() with a large length can take a long
// time to complete. When running profilers, such as VTune, this
// syscall will be constantly interrupted by signals. Expanding
// the file in smaller steps avoids this problem.
return try_split_and_expand_tmpfs(offset, length, alignment);
}
if (err) {
log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
return false;
}
return true;
}
bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
assert(is_tmpfs(), "Wrong filesystem");
return try_expand_tmpfs(offset, length, os::vm_page_size());
}
bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
assert(is_hugetlbfs(), "Wrong filesystem");
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
// Instead of posix_fallocate() we can use a well-known workaround,
// which involves truncating the file to requested size and then try
// to map it to verify that there are enough huge pages available to
// back it.
while (ftruncate(_fd, offset + length) == -1) {
ZErrno err;
if (err != EINTR) {
log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
return false;
}
}
// If we fail mapping during initialization, i.e. when we are pre-mapping
// the heap, then we wait and retry a few times before giving up. Otherwise
// there is a risk that running JVMs back-to-back will fail, since there
// is a delay between process termination and the huge pages owned by that
// process being returned to the huge page pool and made available for new
// allocations.
void* addr = MAP_FAILED;
const int max_attempts = 3;
for (int attempt = 1; attempt <= max_attempts; attempt++) {
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
if (addr != MAP_FAILED || is_init_completed()) {
// Mapping was successful or initialization phase has completed
break;
}
ZErrno err;
log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
err.to_string(), attempt, max_attempts);
// Wait and retry in one second, in the hope that
// huge pages will be available by then.
sleep(1);
}
if (addr == MAP_FAILED) {
// Not enough huge pages left
ZErrno err;
log_error(gc)("Failed to map backing file (%s)", err.to_string());
return false;
}
// Successful mapping, unmap again. From now on the pages we mapped
// will be reserved for this file.
if (munmap(addr, length) == -1) {
ZErrno err;
log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
return false;
}
return true;
}
bool ZBackingFile::expand(size_t offset, size_t length) const {
return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
}

@ -0,0 +1,58 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
#include "memory/allocation.hpp"
class ZBackingFile {
private:
int _fd;
uint64_t _filesystem;
bool _initialized;
int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
int create_fd(const char* name) const;
bool is_tmpfs() const;
bool is_hugetlbfs() const;
bool tmpfs_supports_transparent_huge_pages() const;
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
bool expand_tmpfs(size_t offset, size_t length) const;
bool expand_hugetlbfs(size_t offset, size_t length) const;
public:
ZBackingFile();
bool is_initialized() const;
int fd() const;
bool expand(size_t offset, size_t length) const;
};
#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP

@ -0,0 +1,141 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zBackingPath_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "logging/log.hpp"
#include <stdio.h>
#include <unistd.h>
// Mount information, see proc(5) for more details.
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
if (ZPath != NULL) {
// Use specified path
_path = strdup(ZPath);
} else {
// Find suitable path
_path = find_mountpoint(filesystem, preferred_path);
}
}
ZBackingPath::~ZBackingPath() {
free(_path);
_path = NULL;
}
char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
char* line_mountpoint = NULL;
char* line_filesystem = NULL;
// Parse line and return a newly allocated string containing the mountpoint if
// the line contains a matching filesystem and the mountpoint is accessible by
// the current user.
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
strcmp(line_filesystem, filesystem) != 0 ||
access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
// Not a matching or accessible filesystem
free(line_mountpoint);
line_mountpoint = NULL;
}
free(line_filesystem);
return line_mountpoint;
}
void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
if (fd == NULL) {
ZErrno err;
log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
return;
}
char* line = NULL;
size_t length = 0;
while (getline(&line, &length, fd) != -1) {
char* const mountpoint = get_mountpoint(line, filesystem);
if (mountpoint != NULL) {
mountpoints->add(mountpoint);
}
}
free(line);
fclose(fd);
}
void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
ZArrayIterator<char*> iter(mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
free(mountpoint);
}
mountpoints->clear();
}
char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
char* path = NULL;
ZArray<char*> mountpoints;
get_mountpoints(&mountpoints, filesystem);
if (mountpoints.size() == 0) {
// No filesystem found
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
} else if (mountpoints.size() == 1) {
// One filesystem found
path = strdup(mountpoints.at(0));
} else if (mountpoints.size() > 1) {
// More than one filesystem found
ZArrayIterator<char*> iter(&mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
if (!strcmp(mountpoint, preferred_mountpoint)) {
// Preferred mount point found
path = strdup(mountpoint);
break;
}
}
if (path == NULL) {
// Preferred mount point not found
log_error(gc, init)("More than one %s filesystem found:", filesystem);
ZArrayIterator<char*> iter2(&mountpoints);
for (char* mountpoint; iter2.next(&mountpoint);) {
log_error(gc, init)(" %s", mountpoint);
}
}
}
free_mountpoints(&mountpoints);
return path;
}
const char* ZBackingPath::get() const {
return _path;
}

@ -0,0 +1,46 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
#include "gc/z/zArray.hpp"
#include "memory/allocation.hpp"
class ZBackingPath : public StackObj {
private:
char* _path;
char* get_mountpoint(const char* line, const char* filesystem) const;
void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
void free_mountpoints(ZArray<char*>* mountpoints) const;
char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
public:
ZBackingPath(const char* filesystem, const char* preferred_path);
~ZBackingPath();
const char* get() const;
};
#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP

@ -0,0 +1,33 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
uintptr_t ZAddressReservedStart() {
return ZAddressMetadataMarked0;
}
uintptr_t ZAddressReservedEnd() {
return ZAddressMetadataRemapped + ZAddressOffsetMax;
}

@ -0,0 +1,88 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
//
// Page Allocation Tiers
// ---------------------
//
// Page Type Page Size Object Size Limit Object Alignment
// ------------------------------------------------------------------
// Small 2M <= 265K <MinObjAlignmentInBytes>
// Medium 32M <= 4M 4K
// Large X*M > 4M 2M
// ------------------------------------------------------------------
//
//
// Address Space & Pointer Layout
// ------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000140000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
//
// 6 4 4 4 4 4 0
// 3 7 6 5 2 1 0
// +-------------------+-+----+-----------------------------------------------+
// |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
// +-------------------+-+----+-----------------------------------------------+
// | | | |
// | | | * 41-0 Object Offset (42-bits, 4TB address space)
// | | |
// | | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | | 0010 = Marked1 (Address view 8-12TB)
// | | 0100 = Remapped (Address view 16-20TB)
// | | 1000 = Finalizable (Address view N/A)
// | |
// | * 46-46 Unused (1-bit, always zero)
// |
// * 63-47 Fixed (17-bits, always zero)
//
const size_t ZPlatformPageSizeSmallShift = 21; // 2M
const size_t ZPlatformAddressOffsetBits = 42; // 4TB
const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceStart = (uintptr_t)1 << ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceSize = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
const size_t ZPlatformCacheLineSize = 64;
#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP

@ -0,0 +1,38 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zLargePages.hpp"
#include "runtime/globals.hpp"
void ZLargePages::initialize_platform() {
if (UseLargePages) {
if (UseTransparentHugePages) {
_state = Transparent;
} else {
_state = Explicit;
}
} else {
_state = Disabled;
}
}

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "gc/z/zErrno.hpp"
#include "gc/z/zCPU.hpp"
#include "gc/z/zNUMA.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include <unistd.h>
#include <sys/syscall.h>
#ifndef MPOL_F_NODE
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#endif
#ifndef MPOL_F_ADDR
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#endif
static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
}
void ZNUMA::initialize_platform() {
_enabled = UseNUMA;
}
uint32_t ZNUMA::count() {
if (!_enabled) {
// NUMA support not enabled
return 1;
}
return os::Linux::numa_max_node() + 1;
}
uint32_t ZNUMA::id() {
if (!_enabled) {
// NUMA support not enabled
return 0;
}
return os::Linux::get_node_by_cpu(ZCPU::id());
}
uint32_t ZNUMA::memory_id(uintptr_t addr) {
if (!_enabled) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
uint32_t id = (uint32_t)-1;
if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
ZErrno err;
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
}
assert(id < count(), "Invalid NUMA id");
return id;
}

@ -0,0 +1,237 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMemory.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
#include "logging/log.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <stdio.h>
#include <sys/mman.h>
#include <sys/types.h>
// Support for building on older Linux systems
#ifndef MADV_HUGEPAGE
#define MADV_HUGEPAGE 14
#endif
// Proc file entry for max map mount
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
_manager(),
_file(),
_granule_size(granule_size) {
// Check and warn if max map count seems too low
check_max_map_count(max_capacity, granule_size);
}
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
// Failed to open file, skip check
log_debug(gc)("Failed to open %s", filename);
return;
}
size_t actual_max_map_count = 0;
const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
fclose(file);
if (result != 1) {
// Failed to read file, skip check
log_debug(gc)("Failed to read %s", filename);
return;
}
// The required max map count is impossible to calculate exactly since subsystems
// other than ZGC are also creating memory mappings, and we have no control over that.
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning(gc)("The system limit on number of memory mappings "
"per process might be too low for the given");
log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
"adjust %s to allow for at least", max_capacity / M, filename);
log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
"Continuing execution with the current limit could",
required_max_map_count, actual_max_map_count);
log_warning(gc)("lead to a fatal error down the line, due to failed "
"attempts to map memory.");
}
}
bool ZPhysicalMemoryBacking::is_initialized() const {
return _file.is_initialized();
}
bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
const size_t size = to - from;
// Expand
if (!_file.expand(from, size)) {
return false;
}
// Add expanded space to free list
_manager.free(from, size);
return true;
}
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
assert(is_aligned(size, _granule_size), "Invalid size");
ZPhysicalMemory pmem;
// Allocate segments
for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
const uintptr_t start = _manager.alloc_from_front(_granule_size);
assert(start != UINTPTR_MAX, "Allocation should never fail");
pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
}
return pmem;
}
void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
const size_t nsegments = pmem.nsegments();
// Free segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment segment = pmem.segment(i);
_manager.free(segment.start(), segment.size());
}
}
void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
if (err == ENOMEM) {
fatal("Failed to map memory. Please check the system limit on number of "
"memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
} else {
fatal("Failed to map memory (%s)", err.to_string());
}
}
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
ZErrno err;
log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
}
}
void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
const size_t nsegments = pmem.nsegments();
// Map segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment segment = pmem.segment(i);
const size_t size = segment.size();
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
// Advise on use of transparent huge pages before touching it
if (ZLargePages::is_transparent()) {
advise_view(addr, size);
}
// NUMA interleave memory before touching it
ZNUMA::memory_interleave(addr, size);
if (pretouch) {
pretouch_view(addr, size);
}
addr += size;
}
}
void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
// Note that we must keep the address space reservation intact and just detach
// the backing memory. For this reason we map a new anonymous, non-accessible
// and non-reserved page over the mapping instead of actually unmapping.
const size_t size = pmem.size();
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
ZErrno err;
map_failed(err);
}
}
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
// From an NMT point of view we treat the first heap mapping (marked0) as committed
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
}
}
void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
unmap_view(pmem, ZAddress::good(offset));
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
}
}
void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
assert(ZUnmapBadViews, "Should be enabled");
const uintptr_t addr_good = ZAddress::good(offset);
const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
// Map/Unmap views
map_view(pmem, addr_good, false /* pretouch */);
unmap_view(pmem, addr_bad);
}

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zMemory.hpp"
class ZErrno;
class ZPhysicalMemory;
class ZPhysicalMemoryBacking {
private:
ZMemoryManager _manager;
ZBackingFile _file;
const size_t _granule_size;
void check_max_map_count(size_t max_capacity, size_t granule_size) const;
void map_failed(ZErrno err) const;
void advise_view(uintptr_t addr, size_t size) const;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
public:
ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
bool is_initialized() const;
bool expand(size_t from, size_t to);
ZPhysicalMemory alloc(size_t size);
void free(ZPhysicalMemory pmem);
uintptr_t nmt_address(uintptr_t offset) const;
void map(ZPhysicalMemory pmem, uintptr_t offset) const;
void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
};
#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP

@ -0,0 +1,41 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "logging/log.hpp"
#include <sys/mman.h>
#include <sys/types.h>
bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
// Reserve address space
const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (actual_start != start) {
log_error(gc)("Failed to reserve address space for Java heap");
return false;
}
return true;
}

@ -2282,6 +2282,9 @@ bool OperandForm::is_bound_register() const {
if (strcmp(name, "RegD") == 0) size = 2;
if (strcmp(name, "RegL") == 0) size = 2;
if (strcmp(name, "RegN") == 0) size = 1;
if (strcmp(name, "VecX") == 0) size = 4;
if (strcmp(name, "VecY") == 0) size = 8;
if (strcmp(name, "VecZ") == 0) size = 16;
if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
if (size == 0) {
return false;
@ -3509,6 +3512,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"ClearArray",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
};
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
if( strcmp(_opType,"PrefetchAllocation")==0 )

@ -756,6 +756,9 @@ bool vmIntrinsics::is_disabled_by_flags(vmIntrinsics::ID id) {
#endif // COMPILER1
#ifdef COMPILER2
case vmIntrinsics::_clone:
#if INCLUDE_ZGC
if (UseZGC) return true;
#endif
case vmIntrinsics::_copyOf:
case vmIntrinsics::_copyOfRange:
// These intrinsics use both the objectcopy and the arraycopy

@ -66,7 +66,8 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
#else
#define compilerdirectives_c2_flags(cflags)
#endif

@ -380,8 +380,12 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
continue;
}
#ifdef ASSERT
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
!Universe::heap()->is_in_or_null(*loc)) {
// We can not verify the oop here if we are using ZGC, the oop
// will be bad in case we had a safepoint between a load and a
// load barrier.
if (!UseZGC &&
((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
!Universe::heap()->is_in_or_null(*loc))) {
tty->print_cr("# Found non oop pointer. Dumping state at failure");
// try to dump out some helpful debugging information
trace_codeblob_maps(fr, reg_map);

@ -31,7 +31,8 @@
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
f(CardTableBarrierSet) \
EPSILONGC_ONLY(f(EpsilonBarrierSet)) \
G1GC_ONLY(f(G1BarrierSet))
G1GC_ONLY(f(G1BarrierSet)) \
ZGC_ONLY(f(ZBarrierSet))
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
f(ModRef)

@ -31,10 +31,13 @@
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilonBarrierSet.hpp" // Epsilon support
#include "gc/epsilon/epsilonBarrierSet.hpp"
#endif
#if INCLUDE_G1GC
#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
#include "gc/g1/g1BarrierSet.inline.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zBarrierSet.inline.hpp"
#endif
#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP

@ -89,6 +89,7 @@ class GCHeapLog : public EventLogBase<GCMessage> {
// CMSHeap
// G1CollectedHeap
// ParallelScavengeHeap
// ZCollectedHeap
//
class CollectedHeap : public CHeapObj<mtInternal> {
friend class VMStructs;
@ -207,7 +208,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
Parallel,
CMS,
G1,
Epsilon
Epsilon,
Z
};
static inline size_t filler_array_max_size() {

@ -105,6 +105,21 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _dcmd_gc_run:
return "Diagnostic Command";
case _z_timer:
return "Timer";
case _z_warmup:
return "Warmup";
case _z_allocation_rate:
return "Allocation Rate";
case _z_allocation_stall:
return "Allocation Stall";
case _z_proactive:
return "Proactive";
case _last_gc_cause:
return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";

@ -78,6 +78,12 @@ class GCCause : public AllStatic {
_dcmd_gc_run,
_z_timer,
_z_warmup,
_z_allocation_rate,
_z_allocation_stall,
_z_proactive,
_last_gc_cause
};

@ -43,6 +43,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serialArguments.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/zArguments.hpp"
#endif
struct SupportedGC {
bool& _flag;
@ -59,6 +62,7 @@ struct SupportedGC {
G1GC_ONLY(static G1Arguments g1Arguments;)
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
SERIALGC_ONLY(static SerialArguments serialArguments;)
ZGC_ONLY(static ZArguments zArguments;)
// Table of supported GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.
@ -69,6 +73,7 @@ static const SupportedGC SupportedGCs[] = {
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc"))
ZGC_ONLY_ARG(SupportedGC(UseZGC, CollectedHeap::Z, zArguments, "z gc"))
};
#define FOR_EACH_SUPPORTED_GC(var) \
@ -98,6 +103,7 @@ void GCConfig::select_gc_ergonomically() {
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
NOT_SERIALGC( UNSUPPORTED_OPTION(UseSerialGC);)
NOT_ZGC( UNSUPPORTED_OPTION(UseZGC);)
}
bool GCConfig::is_no_gc_selected() {

@ -43,6 +43,10 @@ GCName GCConfiguration::young_collector() const {
return ParNew;
}
if (UseZGC) {
return NA;
}
return DefNew;
}
@ -59,6 +63,10 @@ GCName GCConfiguration::old_collector() const {
return ParallelOld;
}
if (UseZGC) {
return Z;
}
return SerialOld;
}

@ -38,6 +38,8 @@ enum GCName {
ConcurrentMarkSweep,
G1Old,
G1Full,
Z,
NA,
GCNameEndSentinel
};
@ -55,6 +57,8 @@ class GCNameHelper {
case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
case G1Old: return "G1Old";
case G1Full: return "G1Full";
case Z: return "Z";
case NA: return "N/A";
default: ShouldNotReachHere(); return NULL;
}
}

@ -40,6 +40,6 @@
// should consider placing frequently accessed fields first in
// T, so that field offsets relative to Thread are small, which
// often allows for a more compact instruction encoding.
typedef uint64_t GCThreadLocalData[14]; // 112 bytes
typedef uint64_t GCThreadLocalData[18]; // 144 bytes
#endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP

@ -41,6 +41,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_globals.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/z_globals.hpp"
#endif
#define GC_FLAGS(develop, \
develop_pd, \
@ -137,6 +140,22 @@
constraint, \
writeable)) \
\
ZGC_ONLY(GC_Z_FLAGS( \
develop, \
develop_pd, \
product, \
product_pd, \
diagnostic, \
diagnostic_pd, \
experimental, \
notproduct, \
manageable, \
product_rw, \
lp64_product, \
range, \
constraint, \
writeable)) \
\
/* gc */ \
\
product(bool, UseConcMarkSweepGC, false, \
@ -157,6 +176,9 @@
experimental(bool, UseEpsilonGC, false, \
"Use the Epsilon (no-op) garbage collector") \
\
experimental(bool, UseZGC, false, \
"Use the Z garbage collector") \
\
product(uint, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \

@ -35,6 +35,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_specialized_oop_closures.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/z_specialized_oop_closures.hpp"
#endif
// The following OopClosure types get specialized versions of
// "oop_oop_iterate" that invoke the closures' do_oop methods
@ -67,7 +70,8 @@ class OopsInGenClosure;
SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)) \
CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)) \
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)) \
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f)) \
ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
// We separate these out, because sometime the general one has
// a different definition from the specialized ones, and sometimes it

@ -50,6 +50,9 @@
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/vmStructs_serial.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/vmStructs_z.hpp"
#endif
#define VM_STRUCTS_GC(nonstatic_field, \
volatile_nonstatic_field, \
@ -70,6 +73,10 @@
SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
\
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
/**********************************************************************************/ \
@ -171,6 +178,10 @@
SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
ZGC_ONLY(VM_TYPES_ZGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
\
/******************************************/ \
/* Generation and space hierarchies */ \
/* (needed for run-time type information) */ \
@ -242,6 +253,8 @@
declare_constant_with_value)) \
SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant, \
declare_constant_with_value)) \
ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant, \
declare_constant_with_value)) \
\
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
@ -285,5 +298,7 @@
declare_constant(Generation::LogOfGenGrain) \
declare_constant(Generation::GenGrain) \
#define VM_LONG_CONSTANTS_GC(declare_constant) \
ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
#endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP

@ -0,0 +1,247 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "utilities/macros.hpp"
ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
_decorators(access.decorators()),
_ref_addr(access.resolved_addr()),
_ref(ref),
_tmp(LIR_OprFact::illegalOpr),
_patch_info(access.patch_emit_info()),
_runtime_stub(runtime_stub) {
// Allocate tmp register if needed
if (!_ref_addr->is_register()) {
assert(_ref_addr->is_address(), "Must be an address");
if (_ref_addr->as_address_ptr()->index()->is_valid() ||
_ref_addr->as_address_ptr()->disp() != 0) {
// Has index or displacement, need tmp register to load address into
_tmp = access.gen()->new_pointer_register();
} else {
// No index or displacement, address available in base register
_ref_addr = _ref_addr->as_address_ptr()->base();
}
}
assert(_ref->is_register(), "Must be a register");
assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
}
DecoratorSet ZLoadBarrierStubC1::decorators() const {
return _decorators;
}
LIR_Opr ZLoadBarrierStubC1::ref() const {
return _ref;
}
LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
return _ref_addr;
}
LIR_Opr ZLoadBarrierStubC1::tmp() const {
return _tmp;
}
LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
}
CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
return _patch_info;
}
address ZLoadBarrierStubC1::runtime_stub() const {
return _runtime_stub;
}
void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
if (_patch_info != NULL) {
visitor->do_slow_case(_patch_info);
} else {
visitor->do_slow_case();
}
visitor->do_input(_ref_addr);
visitor->do_output(_ref);
if (_tmp->is_valid()) {
visitor->do_temp(_tmp);
}
}
void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
}
#ifndef PRODUCT
void ZLoadBarrierStubC1::print_name(outputStream* out) const {
out->print("ZLoadBarrierStubC1");
}
#endif // PRODUCT
class LIR_OpZLoadBarrierTest : public LIR_Op {
private:
LIR_Opr _opr;
public:
LIR_OpZLoadBarrierTest(LIR_Opr opr) :
LIR_Op(),
_opr(opr) {}
virtual void visit(LIR_OpVisitState* state) {
state->do_input(_opr);
}
virtual void emit_code(LIR_Assembler* ce) {
ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
}
virtual void print_instr(outputStream* out) const {
_opr->print(out);
out->print(" ");
}
#ifndef PRODUCT
virtual const char* name() const {
return "lir_z_load_barrier_test";
}
#endif // PRODUCT
};
static bool barrier_needed(LIRAccess& access) {
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
}
ZBarrierSetC1::ZBarrierSetC1() :
_load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
_load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
if ((decorators & ON_WEAK_OOP_REF) != 0) {
return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
} else {
return _load_barrier_on_oop_field_preloaded_runtime_stub;
}
}
#ifdef ASSERT
#define __ access.gen()->lir(__FILE__, __LINE__)->
#else
#define __ access.gen()->lir()->
#endif
void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
// Fast path
__ append(new LIR_OpZLoadBarrierTest(result));
// Slow path
const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
__ branch(lir_cond_notEqual, T_ADDRESS, stub);
__ branch_destination(stub->continuation());
}
#undef __
void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
BarrierSetC1::load_at_resolved(access, result);
if (barrier_needed(access)) {
load_barrier(access, result);
}
}
static void pre_load_barrier(LIRAccess& access) {
DecoratorSet decorators = access.decorators();
// Downgrade access to MO_UNORDERED
decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
// Remove C1_WRITE_ACCESS
decorators = (decorators & ~C1_WRITE_ACCESS);
// Generate synthetic load at
access.gen()->access_load_at(decorators,
access.type(),
access.base().item(),
access.offset().opr(),
access.gen()->new_register(access.type()),
NULL /* patch_emit_info */,
NULL /* load_emit_info */);
}
LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_xchg_at_resolved(access, value);
}
LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
}
class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
private:
const DecoratorSet _decorators;
public:
ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
_decorators(decorators) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
return NULL;
}
};
static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
return code_blob->code_begin();
}
void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
_load_barrier_on_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
_load_barrier_on_weak_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
}

@ -0,0 +1,80 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_LIR.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "oops/accessDecorators.hpp"
class ZLoadBarrierStubC1 : public CodeStub {
private:
DecoratorSet _decorators;
LIR_Opr _ref_addr;
LIR_Opr _ref;
LIR_Opr _tmp;
CodeEmitInfo* _patch_info;
address _runtime_stub;
public:
ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
DecoratorSet decorators() const;
LIR_Opr ref() const;
LIR_Opr ref_addr() const;
LIR_Opr tmp() const;
LIR_PatchCode patch_code() const;
CodeEmitInfo*& patch_info();
address runtime_stub() const;
virtual void emit_code(LIR_Assembler* ce);
virtual void visit(LIR_OpVisitState* visitor);
#ifndef PRODUCT
virtual void print_name(outputStream* out) const;
#endif // PRODUCT
};
class ZBarrierSetC1 : public BarrierSetC1 {
private:
address _load_barrier_on_oop_field_preloaded_runtime_stub;
address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
void load_barrier(LIRAccess& access, LIR_Opr result) const;
protected:
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
public:
ZBarrierSetC1();
virtual void generate_c1_runtime_stubs(BufferBlob* blob);
};
#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,206 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.hpp"
#include "opto/node.hpp"
#include "utilities/growableArray.hpp"
class LoadBarrierNode : public MultiNode {
private:
bool _weak;
bool _writeback; // Controls if the barrier writes the healed oop back to memory
// A swap on a memory location must never write back the healed oop
bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
// before healing, otherwise both the oop and the address must be passed to the
// barrier from the oop
static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
void push_dominated_barriers(PhaseIterGVN* igvn) const;
public:
enum {
Control,
Memory,
Oop,
Address,
Number_of_Outputs = Address,
Similar,
Number_of_Inputs
};
LoadBarrierNode(Compile* C,
Node* c,
Node* mem,
Node* val,
Node* adr,
bool weak,
bool writeback,
bool oop_reload_allowed);
virtual int Opcode() const;
virtual const Type *bottom_type() const;
virtual const Type *Value(PhaseGVN *phase) const;
virtual Node *Identity(PhaseGVN *phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
bool linear_only,
bool look_for_similar);
void fix_similar_in_uses(PhaseIterGVN* igvn);
bool has_true_uses() const;
bool can_be_eliminated() const {
return !in(Similar)->is_top();
}
bool is_weak() const {
return _weak;
}
bool is_writeback() const {
return _writeback;
}
bool oop_reload_allowed() const {
return _oop_reload_allowed;
}
};
class LoadBarrierSlowRegNode : public LoadPNode {
public:
LoadBarrierSlowRegNode(Node *c,
Node *mem,
Node *adr,
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual const char * name() {
return "LoadBarrierSlowRegNode";
}
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
}
virtual int Opcode() const;
};
class LoadBarrierWeakSlowRegNode : public LoadPNode {
public:
LoadBarrierWeakSlowRegNode(Node *c,
Node *mem,
Node *adr,
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual const char * name() {
return "LoadBarrierWeakSlowRegNode";
}
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
}
virtual int Opcode() const;
};
class ZBarrierSetC2State : public ResourceObj {
private:
// List of load barrier nodes which need to be expanded before matching
GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
public:
ZBarrierSetC2State(Arena* comp_arena);
int load_barrier_count() const;
void add_load_barrier_node(LoadBarrierNode* n);
void remove_load_barrier_node(LoadBarrierNode* n);
LoadBarrierNode* load_barrier_node(int idx) const;
};
class ZBarrierSetC2 : public BarrierSetC2 {
private:
ZBarrierSetC2State* state() const;
Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
const TypeFunc* load_barrier_Type() const;
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
Node* expected_val,
Node* new_val,
const Type* val_type) const;
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
Node* expected_val,
Node* new_val,
const Type* value_type) const;
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
Node* new_val,
const Type* val_type) const;
public:
Node* load_barrier(GraphKit* kit,
Node* val,
Node* adr,
bool weak = false,
bool writeback = true,
bool oop_reload_allowed = true) const;
virtual void* create_barrier_state(Arena* comp_arena) const;
virtual bool is_gc_barrier_node(Node* node) const;
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
virtual void register_potential_barrier_node(Node* node) const;
virtual void unregister_potential_barrier_node(Node* node) const;
virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
virtual Node* step_over_gc_barrier(Node* c) const { return c; }
// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
// expanded later, then now is the time to do so.
virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
static void find_dominating_barriers(PhaseIterGVN& igvn);
static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
#ifdef ASSERT
virtual void verify_gc_barriers(bool post_parse) const;
#endif
};
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/z/vmStructs_z.hpp"
ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
_ZGlobalPhase(&ZGlobalPhase),
_ZAddressGoodMask(&ZAddressGoodMask),
_ZAddressBadMask(&ZAddressBadMask),
_ZAddressWeakBadMask(&ZAddressWeakBadMask),
_ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
_ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
}
ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;

@ -0,0 +1,121 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
#include "gc/z/zAddressRangeMap.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zPhysicalMemory.hpp"
#include "utilities/macros.hpp"
// Expose some ZGC globals to the SA agent.
class ZGlobalsForVMStructs {
static ZGlobalsForVMStructs _instance;
public:
static ZGlobalsForVMStructs* _instance_p;
ZGlobalsForVMStructs();
uint32_t* _ZGlobalPhase;
uintptr_t* _ZAddressGoodMask;
uintptr_t* _ZAddressBadMask;
uintptr_t* _ZAddressWeakBadMask;
const int* _ZObjectAlignmentSmallShift;
const int* _ZObjectAlignmentSmall;
};
typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
nonstatic_field(ZGlobalsForVMStructs, _ZGlobalPhase, uint32_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressGoodMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressBadMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZAddressWeakBadMask, uintptr_t*) \
nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmallShift, const int*) \
nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmall, const int*) \
\
nonstatic_field(ZCollectedHeap, _heap, ZHeap) \
\
nonstatic_field(ZHeap, _page_allocator, ZPageAllocator) \
nonstatic_field(ZHeap, _pagetable, ZPageTable) \
\
nonstatic_field(ZPage, _type, const uint8_t) \
nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
nonstatic_field(ZPage, _forwarding, ZForwardingTable) \
\
nonstatic_field(ZPageAllocator, _physical, ZPhysicalMemoryManager) \
nonstatic_field(ZPageAllocator, _used, size_t) \
\
nonstatic_field(ZPageTable, _map, ZAddressRangeMapForPageTable) \
\
nonstatic_field(ZAddressRangeMapForPageTable, _map, ZPageTableEntry* const) \
\
nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
\
nonstatic_field(ZForwardingTable, _table, ZForwardingTableEntry*) \
nonstatic_field(ZForwardingTable, _size, size_t) \
\
nonstatic_field(ZPhysicalMemoryManager, _max_capacity, const size_t) \
nonstatic_field(ZPhysicalMemoryManager, _capacity, size_t)
#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \
declare_constant(ZPhaseRelocate) \
declare_constant(ZPageTypeSmall) \
declare_constant(ZPageTypeMedium) \
declare_constant(ZPageTypeLarge) \
declare_constant(ZObjectAlignmentMediumShift) \
declare_constant(ZObjectAlignmentLargeShift)
#define VM_LONG_CONSTANTS_ZGC(declare_constant) \
declare_constant(ZPageSizeSmallShift) \
declare_constant(ZPageSizeMediumShift) \
declare_constant(ZPageSizeMinShift) \
declare_constant(ZAddressOffsetShift) \
declare_constant(ZAddressOffsetBits) \
declare_constant(ZAddressOffsetMask) \
declare_constant(ZAddressSpaceStart)
#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type) \
declare_toplevel_type(ZGlobalsForVMStructs) \
declare_type(ZCollectedHeap, CollectedHeap) \
declare_toplevel_type(ZHeap) \
declare_toplevel_type(ZPage) \
declare_toplevel_type(ZPageAllocator) \
declare_toplevel_type(ZPageTable) \
declare_toplevel_type(ZPageTableEntry) \
declare_toplevel_type(ZAddressRangeMapForPageTable) \
declare_toplevel_type(ZVirtualMemory) \
declare_toplevel_type(ZForwardingTable) \
declare_toplevel_type(ZForwardingTableEntry) \
declare_toplevel_type(ZPhysicalMemoryManager)
#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP

@ -0,0 +1,48 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "runtime/thread.hpp"
void ZAddressMasks::set_good_mask(uintptr_t mask) {
uintptr_t old_bad_mask = ZAddressBadMask;
ZAddressGoodMask = mask;
ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
}
void ZAddressMasks::initialize() {
ZAddressMetadataMarked = ZAddressMetadataMarked0;
set_good_mask(ZAddressMetadataRemapped);
}
void ZAddressMasks::flip_to_marked() {
ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
set_good_mask(ZAddressMetadataMarked);
}
void ZAddressMasks::flip_to_remapped() {
set_good_mask(ZAddressMetadataRemapped);
}

@ -0,0 +1,66 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESS_HPP
#define SHARE_GC_Z_ZADDRESS_HPP
#include "memory/allocation.hpp"
class ZAddress : public AllStatic {
public:
static bool is_null(uintptr_t value);
static bool is_bad(uintptr_t value);
static bool is_good(uintptr_t value);
static bool is_good_or_null(uintptr_t value);
static bool is_weak_bad(uintptr_t value);
static bool is_weak_good(uintptr_t value);
static bool is_weak_good_or_null(uintptr_t value);
static bool is_marked(uintptr_t value);
static bool is_finalizable(uintptr_t value);
static bool is_remapped(uintptr_t value);
static uintptr_t address(uintptr_t value);
static uintptr_t offset(uintptr_t value);
static uintptr_t good(uintptr_t value);
static uintptr_t good_or_null(uintptr_t value);
static uintptr_t finalizable_good(uintptr_t value);
static uintptr_t marked(uintptr_t value);
static uintptr_t marked0(uintptr_t value);
static uintptr_t marked1(uintptr_t value);
static uintptr_t remapped(uintptr_t value);
static uintptr_t remapped_or_null(uintptr_t value);
};
class ZAddressMasks : public AllStatic {
friend class ZAddressTest;
private:
static void set_good_mask(uintptr_t mask);
public:
static void initialize();
static void flip_to_marked();
static void flip_to_remapped();
};
#endif // SHARE_GC_Z_ZADDRESS_HPP

@ -0,0 +1,117 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
#include "gc/z/zAddress.hpp"
#include "gc/z/zGlobals.hpp"
#include "utilities/macros.hpp"
#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
inline bool ZAddress::is_null(uintptr_t value) {
return value == 0;
}
inline bool ZAddress::is_bad(uintptr_t value) {
return value & ZAddressBadMask;
}
inline bool ZAddress::is_good(uintptr_t value) {
return !is_bad(value) && !is_null(value);
}
inline bool ZAddress::is_good_or_null(uintptr_t value) {
// Checking if an address is "not bad" is an optimized version of
// checking if it's "good or null", which eliminates an explicit
// null check. However, the implicit null check only checks that
// the mask bits are zero, not that the entire address is zero.
// This means that an address without mask bits would pass through
// the barrier as if it was null. This should be harmless as such
// addresses should ever be passed through the barrier.
const bool result = !is_bad(value);
assert((is_good(value) || is_null(value)) == result, "Bad address");
return result;
}
inline bool ZAddress::is_weak_bad(uintptr_t value) {
return value & ZAddressWeakBadMask;
}
inline bool ZAddress::is_weak_good(uintptr_t value) {
return !is_weak_bad(value) && !is_null(value);
}
inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
return !is_weak_bad(value);
}
inline bool ZAddress::is_marked(uintptr_t value) {
return value & ZAddressMetadataMarked;
}
inline bool ZAddress::is_finalizable(uintptr_t value) {
return value & ZAddressMetadataFinalizable;
}
inline bool ZAddress::is_remapped(uintptr_t value) {
return value & ZAddressMetadataRemapped;
}
inline uintptr_t ZAddress::offset(uintptr_t value) {
return value & ZAddressOffsetMask;
}
inline uintptr_t ZAddress::good(uintptr_t value) {
return address(offset(value) | ZAddressGoodMask);
}
inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
return is_null(value) ? 0 : good(value);
}
inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
}
inline uintptr_t ZAddress::marked(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked);
}
inline uintptr_t ZAddress::marked0(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked0);
}
inline uintptr_t ZAddress::marked1(uintptr_t value) {
return address(offset(value) | ZAddressMetadataMarked1);
}
inline uintptr_t ZAddress::remapped(uintptr_t value) {
return address(offset(value) | ZAddressMetadataRemapped);
}
inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
return is_null(value) ? 0 : remapped(value);
}
#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP

@ -0,0 +1,63 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
#include "memory/allocation.hpp"
template<typename T, size_t AddressRangeShift>
class ZAddressRangeMapIterator;
template <typename T, size_t AddressRangeShift>
class ZAddressRangeMap {
friend class VMStructs;
friend class ZAddressRangeMapIterator<T, AddressRangeShift>;
private:
T* const _map;
size_t index_for_addr(uintptr_t addr) const;
size_t size() const;
public:
ZAddressRangeMap();
~ZAddressRangeMap();
T get(uintptr_t addr) const;
void put(uintptr_t addr, T value);
};
template <typename T, size_t AddressRangeShift>
class ZAddressRangeMapIterator : public StackObj {
public:
const ZAddressRangeMap<T, AddressRangeShift>* const _map;
size_t _next;
public:
ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map);
bool next(T* value);
};
#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP

@ -0,0 +1,84 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zAddressRangeMap.hpp"
#include "gc/z/zGlobals.hpp"
#include "memory/allocation.inline.hpp"
template <typename T, size_t AddressRangeShift>
ZAddressRangeMap<T, AddressRangeShift>::ZAddressRangeMap() :
_map(MmapArrayAllocator<T>::allocate(size(), mtGC)) {}
template <typename T, size_t AddressRangeShift>
ZAddressRangeMap<T, AddressRangeShift>::~ZAddressRangeMap() {
MmapArrayAllocator<T>::free(_map, size());
}
template <typename T, size_t AddressRangeShift>
size_t ZAddressRangeMap<T, AddressRangeShift>::index_for_addr(uintptr_t addr) const {
assert(!ZAddress::is_null(addr), "Invalid address");
const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
assert(index < size(), "Invalid index");
return index;
}
template <typename T, size_t AddressRangeShift>
size_t ZAddressRangeMap<T, AddressRangeShift>::size() const {
return ZAddressOffsetMax >> AddressRangeShift;
}
template <typename T, size_t AddressRangeShift>
T ZAddressRangeMap<T, AddressRangeShift>::get(uintptr_t addr) const {
const uintptr_t index = index_for_addr(addr);
return _map[index];
}
template <typename T, size_t AddressRangeShift>
void ZAddressRangeMap<T, AddressRangeShift>::put(uintptr_t addr, T value) {
const uintptr_t index = index_for_addr(addr);
_map[index] = value;
}
template <typename T, size_t AddressRangeShift>
inline ZAddressRangeMapIterator<T, AddressRangeShift>::ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map) :
_map(map),
_next(0) {}
template <typename T, size_t AddressRangeShift>
inline bool ZAddressRangeMapIterator<T, AddressRangeShift>::next(T* value) {
if (_next < _map->size()) {
*value = _map->_map[_next++];
return true;
}
// End of map
return false;
}
#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP

@ -0,0 +1,107 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
#include "gc/z/zBitField.hpp"
#include "memory/allocation.hpp"
//
// Allocation flags layout
// -----------------------
//
// 7 4 3 2 1 0
// +---+-+-+-+-+-+
// |000|1|1|1|1|1|
// +---+-+-+-+-+-+
// | | | | | |
// | | | | | * 0-0 Java Thread Flag (1-bit)
// | | | | |
// | | | | * 1-1 Worker Thread Flag (1-bit)
// | | | |
// | | | * 2-2 Non-Blocking Flag (1-bit)
// | | |
// | | * 3-3 Relocation Flag (1-bit)
// | |
// | * 4-4 No Reserve Flag (1-bit)
// |
// * 7-5 Unused (3-bits)
//
class ZAllocationFlags {
private:
typedef ZBitField<uint8_t, bool, 0, 1> field_java_thread;
typedef ZBitField<uint8_t, bool, 1, 1> field_worker_thread;
typedef ZBitField<uint8_t, bool, 2, 1> field_non_blocking;
typedef ZBitField<uint8_t, bool, 3, 1> field_relocation;
typedef ZBitField<uint8_t, bool, 4, 1> field_no_reserve;
uint8_t _flags;
public:
ZAllocationFlags() :
_flags(0) {}
void set_java_thread() {
_flags |= field_java_thread::encode(true);
}
void set_worker_thread() {
_flags |= field_worker_thread::encode(true);
}
void set_non_blocking() {
_flags |= field_non_blocking::encode(true);
}
void set_relocation() {
_flags |= field_relocation::encode(true);
}
void set_no_reserve() {
_flags |= field_no_reserve::encode(true);
}
bool java_thread() const {
return field_java_thread::decode(_flags);
}
bool worker_thread() const {
return field_worker_thread::decode(_flags);
}
bool non_blocking() const {
return field_non_blocking::decode(_flags);
}
bool relocation() const {
return field_relocation::decode(_flags);
}
bool no_reserve() const {
return field_no_reserve::decode(_flags);
}
};
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP

@ -0,0 +1,106 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zWorkers.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
size_t ZArguments::conservative_max_heap_alignment() {
return 0;
}
void ZArguments::initialize() {
GCArguments::initialize();
// Enable NUMA by default
if (FLAG_IS_DEFAULT(UseNUMA)) {
FLAG_SET_DEFAULT(UseNUMA, true);
}
// Disable biased locking by default
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
FLAG_SET_DEFAULT(UseBiasedLocking, false);
}
// Select number of parallel threads
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
}
if (ParallelGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
}
// Select number of concurrent threads
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
}
if (ConcGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
}
#ifdef COMPILER2
// Enable loop strip mining by default
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
}
}
#endif
// To avoid asserts in set_active_workers()
FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
// CompressedOops/UseCompressedClassPointers not supported
FLAG_SET_DEFAULT(UseCompressedOops, false);
FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
// ClassUnloading not (yet) supported
FLAG_SET_DEFAULT(ClassUnloading, false);
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
// Verification before startup and after exit not (yet) supported
FLAG_SET_DEFAULT(VerifyDuringStartup, false);
FLAG_SET_DEFAULT(VerifyBeforeExit, false);
// Verification of stacks not (yet) supported, for the same reason
// we need fixup_partial_loads
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
// JVMCI not (yet) supported
if (EnableJVMCI) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
}
}
CollectedHeap* ZArguments::create_heap() {
return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
}

@ -0,0 +1,38 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
#define SHARE_GC_Z_ZARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class ZArguments : public GCArguments {
public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
#endif // SHARE_GC_Z_ZARGUMENTS_HPP

@ -0,0 +1,87 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARRAY_HPP
#define SHARE_GC_Z_ZARRAY_HPP
#include "memory/allocation.hpp"
template <typename T>
class ZArray {
private:
static const size_t initial_capacity = 32;
T* _array;
size_t _size;
size_t _capacity;
// Copy and assignment are not allowed
ZArray(const ZArray<T>& array);
ZArray<T>& operator=(const ZArray<T>& array);
void expand(size_t new_capacity);
public:
ZArray();
~ZArray();
size_t size() const;
bool is_empty() const;
T at(size_t index) const;
void add(T value);
void clear();
};
template <typename T, bool parallel>
class ZArrayIteratorImpl : public StackObj {
private:
ZArray<T>* const _array;
size_t _next;
public:
ZArrayIteratorImpl(ZArray<T>* array);
bool next(T* elem);
};
// Iterator types
#define ZARRAY_SERIAL false
#define ZARRAY_PARALLEL true
template <typename T>
class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
public:
ZArrayIterator(ZArray<T>* array) :
ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
};
template <typename T>
class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
public:
ZArrayParallelIterator(ZArray<T>* array) :
ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
};
#endif // SHARE_GC_Z_ZARRAY_HPP

@ -0,0 +1,111 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
#define SHARE_GC_Z_ZARRAY_INLINE_HPP
#include "gc/z/zArray.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
template <typename T>
inline ZArray<T>::ZArray() :
_array(NULL),
_size(0),
_capacity(0) {}
template <typename T>
inline ZArray<T>::~ZArray() {
if (_array != NULL) {
FREE_C_HEAP_ARRAY(T, _array);
}
}
template <typename T>
inline size_t ZArray<T>::size() const {
return _size;
}
template <typename T>
inline bool ZArray<T>::is_empty() const {
return size() == 0;
}
template <typename T>
inline T ZArray<T>::at(size_t index) const {
assert(index < _size, "Index out of bounds");
return _array[index];
}
template <typename T>
inline void ZArray<T>::expand(size_t new_capacity) {
T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
if (_array != NULL) {
memcpy(new_array, _array, sizeof(T) * _capacity);
FREE_C_HEAP_ARRAY(T, _array);
}
_array = new_array;
_capacity = new_capacity;
}
template <typename T>
inline void ZArray<T>::add(T value) {
if (_size == _capacity) {
const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
expand(new_capacity);
}
_array[_size++] = value;
}
template <typename T>
inline void ZArray<T>::clear() {
_size = 0;
}
template <typename T, bool parallel>
inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
_array(array),
_next(0) {}
template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) {
const size_t next = Atomic::add(1u, &_next) - 1u;
if (next < _array->size()) {
*elem = _array->at(next);
return true;
}
} else {
if (_next < _array->size()) {
*elem = _array->at(_next++);
return true;
}
}
// No more elements
return false;
}
#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP

@ -0,0 +1,270 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
bool ZBarrier::during_mark() {
return ZGlobalPhase == ZPhaseMark;
}
bool ZBarrier::during_relocate() {
return ZGlobalPhase == ZPhaseRelocate;
}
template <bool finalizable>
bool ZBarrier::should_mark_through(uintptr_t addr) {
// Finalizable marked oops can still exists on the heap after marking
// has completed, in which case we just want to convert this into a
// good oop and not push it on the mark stack.
if (!during_mark()) {
assert(ZAddress::is_marked(addr), "Should be marked");
assert(ZAddress::is_finalizable(addr), "Should be finalizable");
return false;
}
// During marking, we mark through already marked oops to avoid having
// some large part of the object graph hidden behind a pushed, but not
// yet flushed, entry on a mutator mark stack. Always marking through
// allows the GC workers to proceed through the object graph even if a
// mutator touched an oop first, which in turn will reduce the risk of
// having to flush mark stacks multiple times to terminate marking.
//
// However, when doing finalizable marking we don't always want to mark
// through. First, marking through an already strongly marked oop would
// be wasteful, since we will then proceed to do finalizable marking on
// an object which is, or will be, marked strongly. Second, marking
// through an already finalizable marked oop would also be wasteful,
// since such oops can never end up on a mutator mark stack and can
// therefore not hide some part of the object graph from GC workers.
if (finalizable) {
return !ZAddress::is_marked(addr);
}
// Mark through
return true;
}
template <bool finalizable, bool publish>
uintptr_t ZBarrier::mark(uintptr_t addr) {
uintptr_t good_addr;
if (ZAddress::is_marked(addr)) {
// Already marked, but try to mark though anyway
good_addr = ZAddress::good(addr);
} else if (ZAddress::is_remapped(addr)) {
// Already remapped, but also needs to be marked
good_addr = ZAddress::good(addr);
} else {
// Needs to be both remapped and marked
good_addr = remap(addr);
}
// Mark
if (should_mark_through<finalizable>(addr)) {
ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
}
return good_addr;
}
uintptr_t ZBarrier::remap(uintptr_t addr) {
assert(!ZAddress::is_good(addr), "Should not be good");
assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
if (ZHeap::heap()->is_relocating(addr)) {
// Forward
return ZHeap::heap()->forward_object(addr);
}
// Remap
return ZAddress::good(addr);
}
uintptr_t ZBarrier::relocate(uintptr_t addr) {
assert(!ZAddress::is_good(addr), "Should not be good");
assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
if (ZHeap::heap()->is_relocating(addr)) {
// Relocate
return ZHeap::heap()->relocate_object(addr);
}
// Remap
return ZAddress::good(addr);
}
uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
}
uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
return during_relocate() ? relocate(addr) : remap(addr);
}
//
// Load barrier
//
uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
return relocate_or_mark(addr);
}
void ZBarrier::load_barrier_on_oop_fields(oop o) {
assert(ZOop::is_good(o), "Should be good");
ZLoadBarrierOopClosure cl;
o->oop_iterate(&cl);
}
//
// Weak load barrier
//
uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr);
}
uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (ZHeap::heap()->is_object_strongly_live(good_addr)) {
return good_addr;
}
// Not strongly live
return 0;
}
uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (ZHeap::heap()->is_object_live(good_addr)) {
return good_addr;
}
// Not live
return 0;
}
//
// Keep alive barrier
//
uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
return good_addr;
}
uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(ZHeap::heap()->is_object_live(good_addr), "Should be live");
return good_addr;
}
//
// Mark barrier
//
uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
return mark<Strong, Overflow>(addr);
}
uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = mark<Finalizable, Overflow>(addr);
if (ZAddress::is_good(addr)) {
// If the oop was already strongly marked/good, then we do
// not want to downgrade it to finalizable marked/good.
return good_addr;
}
// Make the oop finalizable marked/good, instead of normal marked/good.
// This is needed because an object might first becomes finalizable
// marked by the GC, and then loaded by a mutator thread. In this case,
// the mutator thread must be able to tell that the object needs to be
// strongly marked. The finalizable bit in the oop exists to make sure
// that a load of a finalizable marked oop will fall into the barrier
// slow path so that we can mark the object as strongly reachable.
return ZAddress::finalizable_good(good_addr);
}
uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
assert(during_mark(), "Invalid phase");
// Mark
return mark<Strong, Publish>(addr);
}
//
// Relocate barrier
//
uintptr_t ZBarrier::relocate_barrier_on_root_oop_slow_path(uintptr_t addr) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
assert(during_relocate(), "Invalid phase");
// Relocate
return relocate(addr);
}
//
// Narrow oop variants, never used.
//
oop ZBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
void ZBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
ShouldNotReachHere();
}
oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}
oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return NULL;
}

@ -0,0 +1,121 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIER_HPP
#define SHARE_GC_Z_ZBARRIER_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
typedef bool (*ZBarrierFastPath)(uintptr_t);
typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t);
class ZBarrier : public AllStatic {
private:
static const bool Strong = false;
static const bool Finalizable = true;
static const bool Publish = true;
static const bool Overflow = false;
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
static bool is_null_fast_path(uintptr_t addr);
static bool is_good_or_null_fast_path(uintptr_t addr);
static bool is_weak_good_or_null_fast_path(uintptr_t addr);
static bool is_resurrection_blocked(volatile oop* p, oop* o);
static bool during_mark();
static bool during_relocate();
template <bool finalizable> static bool should_mark_through(uintptr_t addr);
template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
static uintptr_t remap(uintptr_t addr);
static uintptr_t relocate(uintptr_t addr);
static uintptr_t relocate_or_mark(uintptr_t addr);
static uintptr_t relocate_or_remap(uintptr_t addr);
static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
public:
// Load barrier
static oop load_barrier_on_oop(oop o);
static oop load_barrier_on_oop_field(volatile oop* p);
static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static void load_barrier_on_oop_array(volatile oop* p, size_t length);
static void load_barrier_on_oop_fields(oop o);
static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
// Weak load barrier
static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_weak_oop(oop o);
static oop weak_load_barrier_on_weak_oop_field(volatile oop* p);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_phantom_oop(oop o);
static oop weak_load_barrier_on_phantom_oop_field(volatile oop* p);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
// Is alive barrier
static bool is_alive_barrier_on_weak_oop(oop o);
static bool is_alive_barrier_on_phantom_oop(oop o);
// Keep alive barrier
static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
// Mark barrier
static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
static void mark_barrier_on_root_oop_field(oop* p);
// Relocate barrier
static void relocate_barrier_on_root_oop_field(oop* p);
// Narrow oop variants, never used.
static oop load_barrier_on_oop_field(volatile narrowOop* p);
static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
};
#endif // SHARE_GC_Z_ZBARRIER_HPP

@ -0,0 +1,300 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
#define SHARE_GC_Z_ZBARRIER_INLINE_HPP
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zResurrection.inline.hpp"
#include "runtime/atomic.hpp"
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline oop ZBarrier::barrier(volatile oop* p, oop o) {
uintptr_t addr = ZOop::to_address(o);
retry:
// Fast path
if (fast_path(addr)) {
return ZOop::to_oop(addr);
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
// Self heal, but only if the address was actually updated by the slow path,
// which might not be the case, e.g. when marking through an already good oop.
if (p != NULL && good_addr != addr) {
const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
if (prev_addr != addr) {
// Some other thread overwrote the oop. If this oop was updated by a
// weak barrier the new oop might not be good, in which case we need
// to re-apply this barrier.
addr = prev_addr;
goto retry;
}
}
return ZOop::to_oop(good_addr);
}
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
const uintptr_t addr = ZOop::to_address(o);
// Fast path
if (fast_path(addr)) {
// Return the good address instead of the weak good address
// to ensure that the currently active heap view is used.
return ZOop::to_oop(ZAddress::good_or_null(addr));
}
// Slow path
uintptr_t good_addr = slow_path(addr);
// Self heal unless the address returned from the slow path is null,
// in which case resurrection was blocked and we must let the reference
// processor clear the oop. Mutators are not allowed to clear oops in
// these cases, since that would be similar to calling Reference.clear(),
// which would make the reference non-discoverable or silently dropped
// by the reference processor.
if (p != NULL && good_addr != 0) {
// The slow path returns a good/marked address, but we never mark oops
// in a weak load barrier so we always self heal with the remapped address.
const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
if (prev_addr != addr) {
// Some other thread overwrote the oop. The new
// oop is guaranteed to be weak good or null.
assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
// Return the good address instead of the weak good address
// to ensure that the currently active heap view is used.
good_addr = ZAddress::good_or_null(prev_addr);
}
}
return ZOop::to_oop(good_addr);
}
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
inline void ZBarrier::root_barrier(oop* p, oop o) {
const uintptr_t addr = ZOop::to_address(o);
// Fast path
if (fast_path(addr)) {
return;
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
// Non-atomic healing helps speed up root scanning. This is safe to do
// since we are always healing roots in a safepoint, which means we are
// never racing with mutators modifying roots while we are healing them.
// It's also safe in case multiple GC threads try to heal the same root,
// since they would always heal the root in the same way and it does not
// matter in which order it happens.
*p = ZOop::to_oop(good_addr);
}
inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
return ZAddress::is_null(addr);
}
inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
return ZAddress::is_good_or_null(addr);
}
inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
return ZAddress::is_weak_good_or_null(addr);
}
inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
const bool is_blocked = ZResurrection::is_blocked();
// Reload oop after checking the resurrection blocked state. This is
// done to prevent a race where we first load an oop, which is logically
// null but not yet cleared, then this oop is cleared by the reference
// processor and resurrection is unblocked. At this point the mutator
// would see the unblocked state and pass this invalid oop through the
// normal barrier path, which would incorrectly try to mark this oop.
if (p != NULL) {
// First assign to reloaded_o to avoid compiler warning about
// implicit dereference of volatile oop.
const oop reloaded_o = *p;
*o = reloaded_o;
}
return is_blocked;
}
//
// Load barrier
//
inline oop ZBarrier::load_barrier_on_oop(oop o) {
return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
const oop o = *p;
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
}
inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
for (volatile const oop* const end = p + length; p < end; p++) {
load_barrier_on_oop_field(p);
}
}
inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
//
// Weak load barrier
//
inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
const oop o = *p;
return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
const oop o = *p;
return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
}
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (is_resurrection_blocked(p, &o)) {
return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
//
// Is alive barrier
//
inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_weak_oop(o) != NULL;
}
inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_phantom_oop(o) != NULL;
}
//
// Keep alive barrier
//
inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
// This operation is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
const oop o = *p;
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
}
inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
// This operation is only valid when resurrection is blocked.
assert(ZResurrection::is_blocked(), "Invalid phase");
const oop o = *p;
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
}
//
// Mark barrier
//
inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
// The fast path only checks for null since the GC worker
// threads doing marking wants to mark through good oops.
const oop o = *p;
if (finalizable) {
barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
} else {
barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
}
}
inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
for (volatile const oop* const end = p + length; p < end; p++) {
mark_barrier_on_oop_field(p, finalizable);
}
}
inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
}
//
// Relocate barrier
//
inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
}
#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP

@ -0,0 +1,82 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#include "gc/z/c2/zBarrierSetC2.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "runtime/thread.hpp"
ZBarrierSet::ZBarrierSet() :
BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
make_barrier_set_c1<ZBarrierSetC1>(),
make_barrier_set_c2<ZBarrierSetC2>(),
BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
ZBarrierSetAssembler* ZBarrierSet::assembler() {
BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
return reinterpret_cast<ZBarrierSetAssembler*>(bsa);
}
bool ZBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
assert((decorators & AS_RAW) == 0, "Unexpected decorator");
assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator");
assert((decorators & IN_ARCHIVE_ROOT) == 0, "Unexpected decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
if (type == T_OBJECT || type == T_ARRAY) {
if (((decorators & IN_HEAP) != 0) ||
((decorators & IN_CONCURRENT_ROOT) != 0) ||
((decorators & ON_PHANTOM_OOP_REF) != 0)) {
// Barrier needed
return true;
}
}
// Barrier not neeed
return false;
}
void ZBarrierSet::on_thread_create(Thread* thread) {
// Create thread local data
ZThreadLocalData::create(thread);
}
void ZBarrierSet::on_thread_destroy(Thread* thread) {
// Destroy thread local data
ZThreadLocalData::destroy(thread);
}
void ZBarrierSet::on_thread_attach(JavaThread* thread) {
// Set thread local address bad mask
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
}
void ZBarrierSet::on_thread_detach(JavaThread* thread) {
// Flush and free any remaining mark stacks
ZHeap::heap()->mark_flush_and_free(thread);
}

@ -0,0 +1,109 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSET_HPP
#define SHARE_GC_Z_ZBARRIERSET_HPP
#include "gc/shared/barrierSet.hpp"
class ZBarrierSetAssembler;
class ZBarrierSet : public BarrierSet {
public:
ZBarrierSet();
static ZBarrierSetAssembler* assembler();
static bool barrier_needed(DecoratorSet decorators, BasicType type);
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);
virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
virtual void print_on(outputStream* st) const {}
template <DecoratorSet decorators, typename BarrierSetT = ZBarrierSet>
class AccessBarrier : public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
private:
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
template <DecoratorSet expected>
static void verify_decorators_present();
template <DecoratorSet expected>
static void verify_decorators_absent();
static oop* field_addr(oop base, ptrdiff_t offset);
template <typename T>
static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
template <typename T>
static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
public:
//
// In heap
//
template <typename T>
static oop oop_load_in_heap(T* addr);
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
template <typename T>
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
template <typename T>
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length);
static void clone_in_heap(oop src, oop dst, size_t size);
//
// Not in heap
//
template <typename T>
static oop oop_load_not_in_heap(T* addr);
template <typename T>
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
template <typename T>
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
};
};
template<> struct BarrierSet::GetName<ZBarrierSet> {
static const BarrierSet::Name value = BarrierSet::ZBarrierSet;
};
template<> struct BarrierSet::GetType<BarrierSet::ZBarrierSet> {
typedef ::ZBarrierSet type;
};
#endif // SHARE_GC_Z_ZBARRIERSET_HPP

@ -0,0 +1,243 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
#define SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
#include "gc/shared/accessBarrierSupport.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "utilities/debug.hpp"
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_present() {
if ((decorators & expected) == 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_absent() {
if ((decorators & expected) != 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop* ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::field_addr(oop base, ptrdiff_t offset) {
assert(base != NULL, "Invalid base");
return reinterpret_cast<oop*>(reinterpret_cast<intptr_t>((void*)base) + offset);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) {
verify_decorators_present<ON_UNKNOWN_OOP_REF>();
const DecoratorSet decorators_known_strength =
AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
//
// In heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
const oop o = Raw::oop_load_in_heap(addr);
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
oop* const addr = field_addr(base, offset);
const oop o = Raw::oop_load_in_heap(addr);
if (HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o);
}
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
ZBarrier::load_barrier_on_oop_field(addr);
return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
// Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
// calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
// with the motivation that if you're doing Unsafe operations on a Reference.referent
// field, then you're on your own anyway.
ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
return ZBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
return ZBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline bool ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length) {
T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// No check cast, bulk barrier and bulk copy
ZBarrier::load_barrier_on_oop_array(src, length);
return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length);
}
// Check cast and copy each elements
Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
for (const T* const end = src + length; src < end; src++, dst++) {
const oop elem = ZBarrier::load_barrier_on_oop_field(src);
if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) {
// Check cast failed
return false;
}
// Cast is safe, since we know it's never a narrowOop
*(oop*)dst = elem;
}
return true;
}
template <DecoratorSet decorators, typename BarrierSetT>
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
ZBarrier::load_barrier_on_oop_fields(src);
Raw::clone_in_heap(src, dst, size);
}
//
// Not in heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
const oop o = Raw::oop_load_not_in_heap(addr);
if (HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value) {
return load_barrier_on_oop_field_preloaded(addr, o);
}
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return o;
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
}
#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP

@ -0,0 +1,35 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "runtime/thread.hpp"
Address ZBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) {
return Address(thread, ZThreadLocalData::address_bad_mask_offset());
}
Address ZBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) {
return Address(env, ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset());
}

@ -0,0 +1,41 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
#define SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "oops/accessDecorators.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class ZBarrierSetAssemblerBase : public BarrierSetAssembler {
public:
static Address address_bad_mask_from_thread(Register thread);
static Address address_bad_mask_from_jni_env(Register env);
};
#include CPU_HEADER(gc/z/zBarrierSetAssembler)
#endif // SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP

@ -0,0 +1,69 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "runtime/interfaceSupport.inline.hpp"
JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p))
return ZBarrier::load_barrier_on_oop_field_preloaded(p, o);
JRT_END
JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p))
return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o);
JRT_END
JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p))
return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o);
JRT_END
JRT_LEAF(void, ZBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length))
ZBarrier::load_barrier_on_oop_array(p, length);
JRT_END
address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) {
if (decorators & ON_PHANTOM_OOP_REF) {
return load_barrier_on_phantom_oop_field_preloaded_addr();
} else if (decorators & ON_WEAK_OOP_REF) {
return load_barrier_on_weak_oop_field_preloaded_addr();
} else {
return load_barrier_on_oop_field_preloaded_addr();
}
}
address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() {
return reinterpret_cast<address>(load_barrier_on_oop_field_preloaded);
}
address ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() {
return reinterpret_cast<address>(load_barrier_on_weak_oop_field_preloaded);
}
address ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() {
return reinterpret_cast<address>(load_barrier_on_phantom_oop_field_preloaded);
}
address ZBarrierSetRuntime::load_barrier_on_oop_array_addr() {
return reinterpret_cast<address>(load_barrier_on_oop_array);
}

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
#define SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
#include "memory/allocation.hpp"
#include "oops/accessDecorators.hpp"
#include "utilities/globalDefinitions.hpp"
class oopDesc;
class ZBarrierSetRuntime : public AllStatic {
private:
static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p);
static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p);
static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p);
static void load_barrier_on_oop_array(oop* p, size_t length);
public:
static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators);
static address load_barrier_on_oop_field_preloaded_addr();
static address load_barrier_on_weak_oop_field_preloaded_addr();
static address load_barrier_on_phantom_oop_field_preloaded_addr();
static address load_barrier_on_oop_array_addr();
};
#endif // SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP

@ -0,0 +1,80 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBITFIELD_HPP
#define SHARE_GC_Z_ZBITFIELD_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
//
// Example
// -------
//
// typedef ZBitField<uint64_t, uint8_t, 0, 2, 3> field_word_aligned_size;
// typedef ZBitField<uint64_t, uint32_t, 2, 30> field_length;
//
//
// 6 3 3
// 3 2 1 2 10
// +-----------------------------------+---------------------------------+--+
// |11111111 11111111 11111111 11111111|11111111 11111111 11111111 111111|11|
// +-----------------------------------+---------------------------------+--+
// | | |
// | 31-2 field_length (30-bits) * |
// | |
// | 1-0 field_word_aligned_size (2-bits) *
// |
// * 63-32 Unused (32-bits)
//
//
// field_word_aligned_size::encode(16) = 2
// field_length::encode(2342) = 9368
//
// field_word_aligned_size::decode(9368 | 2) = 16
// field_length::decode(9368 | 2) = 2342
//
template <typename ContainerType, typename ValueType, int FieldShift, int FieldBits, int ValueShift = 0>
class ZBitField : public AllStatic {
private:
static const int ContainerBits = sizeof(ContainerType) * BitsPerByte;
STATIC_ASSERT(FieldBits < ContainerBits);
STATIC_ASSERT(FieldShift + FieldBits <= ContainerBits);
STATIC_ASSERT(ValueShift + FieldBits <= ContainerBits);
static const ContainerType FieldMask = (((ContainerType)1 << FieldBits) - 1);
public:
static ValueType decode(ContainerType container) {
return (ValueType)(((container >> FieldShift) & FieldMask) << ValueShift);
}
static ContainerType encode(ValueType value) {
assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value");
return ((ContainerType)value >> ValueShift) << FieldShift;
}
};
#endif // SHARE_GC_Z_ZBITFIELD_HPP

@ -0,0 +1,42 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBITMAP_HPP
#define SHARE_GC_Z_ZBITMAP_HPP
#include "utilities/bitMap.hpp"
class ZBitMap : public CHeapBitMap {
private:
static bm_word_t bit_mask_pair(idx_t bit);
bool par_set_bit_pair_finalizable(idx_t bit, bool& inc_live);
bool par_set_bit_pair_strong(idx_t bit, bool& inc_live);
public:
ZBitMap(idx_t size_in_bits);
bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live);
};
#endif // SHARE_GC_Z_ZBITMAP_HPP

@ -0,0 +1,75 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZBITMAP_INLINE_HPP
#define SHARE_GC_Z_ZBITMAP_INLINE_HPP
#include "gc/z/zBitMap.hpp"
#include "runtime/atomic.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/debug.hpp"
inline ZBitMap::ZBitMap(idx_t size_in_bits) :
CHeapBitMap(size_in_bits, mtGC, false /* clear */) {}
inline BitMap::bm_word_t ZBitMap::bit_mask_pair(idx_t bit) {
assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index");
return (bm_word_t)3 << bit_in_word(bit);
}
inline bool ZBitMap::par_set_bit_pair_finalizable(idx_t bit, bool& inc_live) {
inc_live = par_set_bit(bit);
return inc_live;
}
inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) {
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t pair_mask = bit_mask_pair(bit);
bm_word_t old_val = *addr;
do {
const bm_word_t new_val = old_val | pair_mask;
if (new_val == old_val) {
inc_live = false;
return false; // Someone else beat us to it.
}
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
if (cur_val == old_val) {
const bm_word_t marked_mask = bit_mask(bit);
inc_live = !(old_val & marked_mask);
return true; // Success.
}
old_val = cur_val; // The value changed, try again.
} while (true);
}
inline bool ZBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live) {
if (finalizable) {
return par_set_bit_pair_finalizable(bit, inc_live);
} else {
return par_set_bit_pair_strong(bit, inc_live);
}
}
#endif // SHARE_GC_Z_ZBITMAP_INLINE_HPP

@ -0,0 +1,74 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zCPU.hpp"
#include "logging/log.hpp"
#include "memory/padded.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/debug.hpp"
#define ZCPU_UNKNOWN_AFFINITY (Thread*)-1;
#define ZCPU_UNKNOWN_SELF (Thread*)-2;
PaddedEnd<ZCPU::ZCPUAffinity>* ZCPU::_affinity = NULL;
__thread Thread* ZCPU::_self = ZCPU_UNKNOWN_SELF;
__thread uint32_t ZCPU::_cpu = 0;
void ZCPU::initialize() {
assert(_affinity == NULL, "Already initialized");
const uint32_t ncpus = count();
_affinity = PaddedArray<ZCPUAffinity, mtGC>::create_unfreeable(ncpus);
for (uint32_t i = 0; i < ncpus; i++) {
_affinity[i]._thread = ZCPU_UNKNOWN_AFFINITY;
}
log_info(gc, init)("CPUs: %u total, %u available",
os::processor_count(),
os::initial_active_processor_count());
}
uint32_t ZCPU::count() {
return os::processor_count();
}
uint32_t ZCPU::id() {
assert(_affinity != NULL, "Not initialized");
// Fast path
if (_affinity[_cpu]._thread == _self) {
return _cpu;
}
// Slow path
_self = Thread::current();
_cpu = os::processor_id();
// Update affinity table
_affinity[_cpu]._thread = _self;
return _cpu;
}

@ -0,0 +1,49 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZCPU_HPP
#define SHARE_GC_Z_ZCPU_HPP
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
class Thread;
class ZCPU : public AllStatic {
private:
struct ZCPUAffinity {
Thread* _thread;
};
static PaddedEnd<ZCPUAffinity>* _affinity;
static __thread Thread* _self;
static __thread uint32_t _cpu;
public:
static void initialize();
static uint32_t count();
static uint32_t id();
};
#endif // SHARE_GC_Z_ZCPU_HPP

@ -0,0 +1,348 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zNMethodTable.hpp"
#include "gc/z/zServiceability.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/mutexLocker.hpp"
ZCollectedHeap* ZCollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
assert(heap->kind() == CollectedHeap::Z, "Invalid name");
return (ZCollectedHeap*)heap;
}
ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
_collector_policy(policy),
_soft_ref_policy(),
_barrier_set(),
_initialize(&_barrier_set),
_heap(),
_director(new ZDirector()),
_driver(new ZDriver()),
_stat(new ZStat()),
_runtime_workers() {}
CollectedHeap::Name ZCollectedHeap::kind() const {
return CollectedHeap::Z;
}
const char* ZCollectedHeap::name() const {
return ZGCName;
}
jint ZCollectedHeap::initialize() {
if (!_heap.is_initialized()) {
return JNI_ENOMEM;
}
initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
(HeapWord*)ZAddressReservedEnd());
return JNI_OK;
}
void ZCollectedHeap::initialize_serviceability() {
_heap.serviceability_initialize();
}
void ZCollectedHeap::stop() {
_director->stop();
_driver->stop();
_stat->stop();
}
CollectorPolicy* ZCollectedHeap::collector_policy() const {
return _collector_policy;
}
SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
size_t ZCollectedHeap::max_capacity() const {
return _heap.max_capacity();
}
size_t ZCollectedHeap::capacity() const {
return _heap.capacity();
}
size_t ZCollectedHeap::used() const {
return _heap.used();
}
bool ZCollectedHeap::is_maximal_no_gc() const {
// Not supported
ShouldNotReachHere();
return false;
}
bool ZCollectedHeap::is_scavengable(oop obj) {
return false;
}
bool ZCollectedHeap::is_in(const void* p) const {
return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
}
bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
return is_in(p);
}
HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
if (addr != 0) {
*actual_size = requested_size;
}
return (HeapWord*)addr;
}
HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
return (HeapWord*)_heap.alloc_object(size_in_bytes);
}
MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype) {
MetaWord* result;
// Start asynchronous GC
collect(GCCause::_metadata_GC_threshold);
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
if (result != NULL) {
return result;
}
// Start synchronous GC
collect(GCCause::_metadata_GC_clear_soft_refs);
// Retry allocation
result = loader_data->metaspace_non_null()->allocate(size, mdtype);
if (result != NULL) {
return result;
}
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
if (result != NULL) {
return result;
}
// Out of memory
return NULL;
}
void ZCollectedHeap::collect(GCCause::Cause cause) {
_driver->collect(cause);
}
void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
// These collection requests are ignored since ZGC can't run a synchronous
// GC cycle from within the VM thread. This is considered benign, since the
// only GC causes comming in here should be heap dumper and heap inspector.
// However, neither the heap dumper nor the heap inspector really need a GC
// to happen, but the result of their heap iterations might in that case be
// less accurate since they might include objects that would otherwise have
// been collected by a GC.
assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
guarantee(cause == GCCause::_heap_dump ||
cause == GCCause::_heap_inspection, "Invalid cause");
}
void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
// Not supported
ShouldNotReachHere();
}
bool ZCollectedHeap::supports_tlab_allocation() const {
return true;
}
size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
return _heap.tlab_capacity();
}
size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
return _heap.tlab_used();
}
size_t ZCollectedHeap::max_tlab_size() const {
return _heap.max_tlab_size();
}
size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
return _heap.unsafe_max_tlab_alloc();
}
bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
return false;
}
bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
// Not supported
ShouldNotReachHere();
return true;
}
bool ZCollectedHeap::card_mark_must_follow_store() const {
// Not supported
ShouldNotReachHere();
return false;
}
GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
}
GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
}
void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl);
}
void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl);
}
HeapWord* ZCollectedHeap::block_start(const void* addr) const {
return (HeapWord*)_heap.block_start((uintptr_t)addr);
}
size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
return ZUtils::bytes_to_words(size_in_bytes);
}
bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
return _heap.block_is_obj((uintptr_t)addr);
}
void ZCollectedHeap::register_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
ZNMethodTable::register_nmethod(nm);
}
void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
ZNMethodTable::unregister_nmethod(nm);
}
void ZCollectedHeap::verify_nmethod(nmethod* nm) {
// Does nothing
}
WorkGang* ZCollectedHeap::get_safepoint_workers() {
return _runtime_workers.workers();
}
jlong ZCollectedHeap::millis_since_last_gc() {
return ZStatCycle::time_since_last() / MILLIUNITS;
}
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
tc->do_thread(_director);
tc->do_thread(_driver);
tc->do_thread(_stat);
_heap.worker_threads_do(tc);
_runtime_workers.threads_do(tc);
}
VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
const size_t capacity_in_words = capacity() / HeapWordSize;
const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
return VirtualSpaceSummary(reserved_region().start(),
reserved_region().start() + capacity_in_words,
reserved_region().start() + max_capacity_in_words);
}
void ZCollectedHeap::prepare_for_verify() {
// Does nothing
}
void ZCollectedHeap::print_on(outputStream* st) const {
_heap.print_on(st);
}
void ZCollectedHeap::print_on_error(outputStream* st) const {
CollectedHeap::print_on_error(st);
st->print_cr("Address Space");
st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart);
st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd);
st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
st->print_cr( "Heap");
st->print_cr( " GlobalPhase: %u", ZGlobalPhase);
st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum);
st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
st->print_cr( "Metadata Bits");
st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask);
st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask);
st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked);
st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
}
void ZCollectedHeap::print_extended_on(outputStream* st) const {
_heap.print_extended_on(st);
}
void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
_director->print_on(st);
st->cr();
_driver->print_on(st);
st->cr();
_stat->print_on(st);
st->cr();
_heap.print_worker_threads_on(st);
_runtime_workers.print_threads_on(st);
}
void ZCollectedHeap::print_tracing_info() const {
// Does nothing
}
void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
_heap.verify();
}
bool ZCollectedHeap::is_oop(oop object) const {
return CollectedHeap::is_oop(object) && _heap.is_oop(object);
}

@ -0,0 +1,133 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
#define SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zDirector.hpp"
#include "gc/z/zDriver.hpp"
#include "gc/z/zInitialize.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zRuntimeWorkers.hpp"
#include "gc/z/zStat.hpp"
class ZCollectedHeap : public CollectedHeap {
friend class VMStructs;
private:
ZCollectorPolicy* _collector_policy;
SoftRefPolicy _soft_ref_policy;
ZBarrierSet _barrier_set;
ZInitialize _initialize;
ZHeap _heap;
ZDirector* _director;
ZDriver* _driver;
ZStat* _stat;
ZRuntimeWorkers _runtime_workers;
virtual HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size);
public:
static ZCollectedHeap* heap();
using CollectedHeap::ensure_parsability;
using CollectedHeap::accumulate_statistics_all_tlabs;
using CollectedHeap::resize_all_tlabs;
ZCollectedHeap(ZCollectorPolicy* policy);
virtual Name kind() const;
virtual const char* name() const;
virtual jint initialize();
virtual void initialize_serviceability();
virtual void stop();
virtual CollectorPolicy* collector_policy() const;
virtual SoftRefPolicy* soft_ref_policy();
virtual size_t max_capacity() const;
virtual size_t capacity() const;
virtual size_t used() const;
virtual bool is_maximal_no_gc() const;
virtual bool is_scavengable(oop obj);
virtual bool is_in(const void* p) const;
virtual bool is_in_closed_subset(const void* p) const;
virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype);
virtual void collect(GCCause::Cause cause);
virtual void collect_as_vm_thread(GCCause::Cause cause);
virtual void do_full_collection(bool clear_all_soft_refs);
virtual bool supports_tlab_allocation() const;
virtual size_t tlab_capacity(Thread* thr) const;
virtual size_t tlab_used(Thread* thr) const;
virtual size_t max_tlab_size() const;
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
virtual bool can_elide_tlab_store_barriers() const;
virtual bool can_elide_initializing_store_barrier(oop new_obj);
virtual bool card_mark_must_follow_store() const;
virtual GrowableArray<GCMemoryManager*> memory_managers();
virtual GrowableArray<MemoryPool*> memory_pools();
virtual void object_iterate(ObjectClosure* cl);
virtual void safe_object_iterate(ObjectClosure* cl);
virtual HeapWord* block_start(const void* addr) const;
virtual size_t block_size(const HeapWord* addr) const;
virtual bool block_is_obj(const HeapWord* addr) const;
virtual void register_nmethod(nmethod* nm);
virtual void unregister_nmethod(nmethod* nm);
virtual void verify_nmethod(nmethod* nmethod);
virtual WorkGang* get_safepoint_workers();
virtual jlong millis_since_last_gc();
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual VirtualSpaceSummary create_heap_space_summary();
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
virtual void print_tracing_info() const;
virtual void prepare_for_verify();
virtual void verify(VerifyOption option /* ignored */);
virtual bool is_oop(oop object) const;
};
#endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP

@ -0,0 +1,31 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zCollectorPolicy.hpp"
#include "gc/z/zGlobals.hpp"
void ZCollectorPolicy::initialize_alignments() {
_space_alignment = ZPageSizeMin;
_heap_alignment = _space_alignment;
}

@ -0,0 +1,34 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
#define SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
#include "gc/shared/collectorPolicy.hpp"
class ZCollectorPolicy : public CollectorPolicy {
public:
virtual void initialize_alignments();
};
#endif // SHARE_GC_Z_ZCOLLECTORPOLICY_HPP

@ -0,0 +1,147 @@
#
# GDB functions for debugging the Z Garbage Collector
#
printf "Loading zDebug.gdb\n"
# Print Klass*
define zpk
printf "Klass: %s\n", (char*)((Klass*)($arg0))->_name->_body
end
# Print oop
define zpo
set $obj = (oopDesc*)($arg0)
printf "Oop: 0x%016llx\tState: ", (uintptr_t)$obj
if ((uintptr_t)$obj & (uintptr_t)ZAddressGoodMask)
printf "Good "
if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
printf "(Remapped)"
else
if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
printf "(Marked)"
else
printf "(Unknown)"
end
end
else
printf "Bad "
if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataMarked)
# Should be marked
if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
printf "(Not Marked, Remapped)"
else
printf "(Not Marked, Not Remapped)"
end
else
if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataRemapped)
# Should be remapped
if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
printf "(Marked, Not Remapped)"
else
printf "(Not Marked, Not Remapped)"
end
else
# Unknown
printf "(Unknown)"
end
end
end
printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZPageSizeMinShift
x/16gx $obj
printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$obj->_metadata->_klass->_name->_body
end
# Print heap page by pagetable index
define zpp
set $page = (ZPage*)((uintptr_t)ZHeap::_heap._pagetable._map._map[($arg0)] & ~1)
printf "Page %p\n", $page
print *$page
end
# Print pagetable
define zpt
printf "Pagetable (first 128 slots)\n"
x/128gx ZHeap::_heap._pagetable._map._map
end
# Print live map
define __zmarked
set $livemap = $arg0
set $bit = $arg1
set $size = $livemap._bitmap._size
set $segment = $size / ZLiveMap::nsegments
set $segment_bit = 1 << $segment
printf "Segment is "
if !($livemap._segment_live_bits & $segment_bit)
printf "NOT "
end
printf "live (segment %d)\n", $segment
if $bit >= $size
print "Error: Bit %z out of bounds (bitmap size %z)\n", $bit, $size
else
set $word_index = $bit / 64
set $bit_index = $bit % 64
set $word = $livemap._bitmap._map[$word_index]
set $live_bit = $word & (1 << $bit_index)
printf "Object is "
if $live_bit == 0
printf "NOT "
end
printf "live (word index %d, bit index %d)\n", $word_index, $bit_index
end
end
define zmarked
set $addr = $arg0
set $obj = ((uintptr_t)$addr & ZAddressOffsetMask)
set $page_index = $obj >> ZPageSizeMinShift
set $page_entry = (uintptr_t)ZHeap::_heap._pagetable._map._map[$page_index]
set $page = (ZPage*)($page_entry & ~1)
set $page_start = (uintptr_t)$page._virtual._start
set $page_end = (uintptr_t)$page._virtual._end
set $page_seqnum = $page._livemap._seqnum
set $global_seqnum = ZGlobalSeqNum
if $obj < $page_start || $obj >= $page_end
printf "Error: %p not in page %p (start %p, end %p)\n", $obj, $page, $page_start, $page_end
else
printf "Page is "
if $page_seqnum != $global_seqnum
printf "NOT "
end
printf "live (page %p, page seqnum %d, global seqnum %d)\n", $page, $page_seqnum, $global_seqnum
#if $page_seqnum == $global_seqnum
set $offset = $obj - $page_start
set $bit = $offset / 8
__zmarked $page._livemap $bit
#end
end
end
# Print heap information
define zph
printf "Address Space\n"
printf " Start: 0x%llx\n", ZAddressSpaceStart
printf " End: 0x%llx\n", ZAddressSpaceEnd
printf " Size: %-15llu (0x%llx)\n", ZAddressSpaceSize, ZAddressSpaceSize
printf "Heap\n"
printf " GlobalPhase: %u\n", ZGlobalPhase
printf " GlobalSeqNum: %u\n", ZGlobalSeqNum
printf " Offset Max: %-15llu (0x%llx)\n", ZAddressOffsetMax, ZAddressOffsetMax
printf " Page Size Small: %-15llu (0x%llx)\n", ZPageSizeSmall, ZPageSizeSmall
printf " Page Size Medium: %-15llu (0x%llx)\n", ZPageSizeMedium, ZPageSizeMedium
printf "Metadata Bits\n"
printf " Good: 0x%016llx\n", ZAddressGoodMask
printf " Bad: 0x%016llx\n", ZAddressBadMask
printf " WeakBad: 0x%016llx\n", ZAddressWeakBadMask
printf " Marked: 0x%016llx\n", ZAddressMetadataMarked
printf " Remapped: 0x%016llx\n", ZAddressMetadataRemapped
end
# End of file

@ -0,0 +1,222 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zDirector.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zUtils.hpp"
#include "logging/log.hpp"
const double ZDirector::one_in_1000 = 3.290527;
ZDirector::ZDirector() :
_metronome(ZStatAllocRate::sample_hz) {
set_name("ZDirector");
create_and_start();
}
void ZDirector::sample_allocation_rate() const {
// Sample allocation rate. This is needed by rule_allocation_rate()
// below to estimate the time we have until we run out of memory.
const double bytes_per_second = ZStatAllocRate::sample_and_reset();
log_debug(gc, alloc)("Allocation Rate: %.3fMB/s, Avg: %.3f(+/-%.3f)MB/s",
bytes_per_second / M,
ZStatAllocRate::avg() / M,
ZStatAllocRate::avg_sd() / M);
}
bool ZDirector::is_first() const {
return ZStatCycle::ncycles() == 0;
}
bool ZDirector::is_warm() const {
return ZStatCycle::ncycles() >= 3;
}
bool ZDirector::rule_timer() const {
if (ZCollectionInterval == 0) {
// Rule disabled
return false;
}
// Perform GC if timer has expired.
const double time_since_last_gc = ZStatCycle::time_since_last();
const double time_until_gc = ZCollectionInterval - time_since_last_gc;
log_debug(gc, director)("Rule: Timer, Interval: %us, TimeUntilGC: %.3lfs",
ZCollectionInterval, time_until_gc);
return time_until_gc <= 0;
}
bool ZDirector::rule_warmup() const {
if (is_warm()) {
// Rule disabled
return false;
}
// Perform GC if heap usage passes 10/20/30% and no other GC has been
// performed yet. This allows us to get some early samples of the GC
// duration, which is needed by the other rules.
const size_t max_capacity = ZHeap::heap()->max_capacity();
const size_t used = ZHeap::heap()->used();
const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1;
const size_t used_threshold = max_capacity * used_threshold_percent;
log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
used_threshold_percent * 100, used / M, used_threshold / M);
return used >= used_threshold;
}
bool ZDirector::rule_allocation_rate() const {
if (is_first()) {
// Rule disabled
return false;
}
// Perform GC if the estimated max allocation rate indicates that we
// will run out of memory. The estimated max allocation rate is based
// on the moving average of the sampled allocation rate plus a safety
// margin based on variations in the allocation rate and unforseen
// allocation spikes.
// Calculate amount of free memory available to Java threads. Note that
// the heap reserve is not available to Java threads and is therefore not
// considered part of the free memory.
const size_t max_capacity = ZHeap::heap()->max_capacity();
const size_t max_reserve = ZHeap::heap()->max_reserve();
const size_t used = ZHeap::heap()->used();
const size_t free_with_reserve = max_capacity - used;
const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
// Calculate time until OOM given the max allocation rate and the amount
// of free memory. The allocation rate is a moving average and we multiply
// that with an alllcation spike tolerance factor to guard against unforseen
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
// the allocation rate variance, which means the probablility is 1 in 1000
// that a sample is outside of the confidence interval.
const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
// Calculate max duration of a GC cycle. The duration of GC is a moving
// average, we add ~3.3 sigma to account for the GC duration variance.
const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
// Calculate time until GC given the time until OOM and max duration of GC.
// We also deduct the sample interval, so that we don't overshoot the target
// time and end up starting the GC too late in the next interval.
const double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
const double time_until_gc = time_until_oom - max_duration_of_gc - sample_interval;
log_debug(gc, director)("Rule: Allocation Rate, MaxAllocRate: %.3lfMB/s, Free: " SIZE_FORMAT "MB, MaxDurationOfGC: %.3lfs, TimeUntilGC: %.3lfs",
max_alloc_rate / M, free / M, max_duration_of_gc, time_until_gc);
return time_until_gc <= 0;
}
bool ZDirector::rule_proactive() const {
if (!ZProactive || !is_warm()) {
// Rule disabled
return false;
}
// Perform GC if the impact of doing so, in terms of application throughput
// reduction, is considered acceptable. This rule allows us to keep the heap
// size down and allow reference processing to happen even when we have a lot
// of free space on the heap.
// Only consider doing a proactive GC if the heap usage has grown by at least
// 10% of the max capacity since the previous GC, or more than 5 minutes has
// passed since the previous GC. This helps avoid superfluous GCs when running
// applications with very low allocation rate.
const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10%
const size_t used_threshold = used_after_last_gc + used_increase_threshold;
const size_t used = ZHeap::heap()->used();
const double time_since_last_gc = ZStatCycle::time_since_last();
const double time_since_last_gc_threshold = 5 * 60; // 5 minutes
if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) {
// Don't even consider doing a proactive GC
log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3lfs",
(used_threshold - used) / M,
time_since_last_gc_threshold - time_since_last_gc);
return false;
}
const double assumed_throughput_drop_during_gc = 0.50; // 50%
const double acceptable_throughput_drop = 0.01; // 1%
const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
const double acceptable_gc_interval = max_duration_of_gc * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
const double time_until_gc = acceptable_gc_interval - time_since_last_gc;
log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3lfs, TimeSinceLastGC: %.3lfs, TimeUntilGC: %.3lfs",
acceptable_gc_interval, time_since_last_gc, time_until_gc);
return time_until_gc <= 0;
}
GCCause::Cause ZDirector::make_gc_decision() const {
// Rule 0: Timer
if (rule_timer()) {
return GCCause::_z_timer;
}
// Rule 1: Warmup
if (rule_warmup()) {
return GCCause::_z_warmup;
}
// Rule 2: Allocation rate
if (rule_allocation_rate()) {
return GCCause::_z_allocation_rate;
}
// Rule 3: Proactive
if (rule_proactive()) {
return GCCause::_z_proactive;
}
// No GC
return GCCause::_no_gc;
}
void ZDirector::run_service() {
// Main loop
while (_metronome.wait_for_tick()) {
sample_allocation_rate();
const GCCause::Cause cause = make_gc_decision();
if (cause != GCCause::_no_gc) {
ZCollectedHeap::heap()->collect(cause);
}
}
}
void ZDirector::stop_service() {
_metronome.stop();
}

@ -0,0 +1,56 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZDIRECTOR_HPP
#define SHARE_GC_Z_ZDIRECTOR_HPP
#include "gc/shared/concurrentGCThread.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/z/zMetronome.hpp"
class ZDirector : public ConcurrentGCThread {
private:
static const double one_in_1000;
ZMetronome _metronome;
void sample_allocation_rate() const;
bool is_first() const;
bool is_warm() const;
bool rule_timer() const;
bool rule_warmup() const;
bool rule_allocation_rate() const;
bool rule_proactive() const;
GCCause::Cause make_gc_decision() const;
protected:
virtual void run_service();
virtual void stop_service();
public:
ZDirector();
};
#endif // SHARE_GC_Z_ZDIRECTOR_HPP

@ -0,0 +1,403 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/isGCActiveMark.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zDriver.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zMessagePort.inline.hpp"
#include "gc/z/zServiceability.hpp"
#include "gc/z/zStat.hpp"
#include "logging/log.hpp"
#include "runtime/vm_operations.hpp"
#include "runtime/vmThread.hpp"
static const ZStatPhaseCycle ZPhaseCycle("Garbage Collection Cycle");
static const ZStatPhasePause ZPhasePauseMarkStart("Pause Mark Start");
static const ZStatPhaseConcurrent ZPhaseConcurrentMark("Concurrent Mark");
static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinue("Concurrent Mark Continue");
static const ZStatPhasePause ZPhasePauseMarkEnd("Pause Mark End");
static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References");
static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set");
static const ZStatPhaseConcurrent ZPhaseConcurrentDestroyDetachedPages("Concurrent Destroy Detached Pages");
static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set");
static const ZStatPhaseConcurrent ZPhaseConcurrentPrepareRelocationSet("Concurrent Prepare Relocation Set");
static const ZStatPhasePause ZPhasePauseRelocateStart("Pause Relocate Start");
static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate");
static const ZStatCriticalPhase ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */);
static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads);
class ZOperationClosure : public StackObj {
public:
virtual const char* name() const = 0;
virtual bool needs_inactive_gc_locker() const {
// An inactive GC locker is needed in operations where we change the good
// mask or move objects. Changing the good mask will invalidate all oops,
// which makes it conceptually the same thing as moving all objects.
return false;
}
virtual bool do_operation() = 0;
};
class VM_ZOperation : public VM_Operation {
private:
ZOperationClosure* _cl;
uint _gc_id;
bool _gc_locked;
bool _success;
public:
VM_ZOperation(ZOperationClosure* cl) :
_cl(cl),
_gc_id(GCId::current()),
_gc_locked(false),
_success(false) {}
virtual VMOp_Type type() const {
return VMOp_ZOperation;
}
virtual const char* name() const {
return _cl->name();
}
virtual bool doit_prologue() {
Heap_lock->lock();
return true;
}
virtual void doit() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
// JVMTI support
SvcGCMarker sgcm(SvcGCMarker::OTHER);
// Setup GC id
GCIdMark gcid(_gc_id);
if (_cl->needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) {
// GC locker is active, bail out
_gc_locked = true;
} else {
// Execute operation
IsGCActiveMark mark;
_success = _cl->do_operation();
}
}
virtual void doit_epilogue() {
Heap_lock->unlock();
}
bool gc_locked() {
return _gc_locked;
}
bool success() const {
return _success;
}
};
class ZMarkStartClosure : public ZOperationClosure {
public:
virtual const char* name() const {
return "ZMarkStart";
}
virtual bool needs_inactive_gc_locker() const {
return true;
}
virtual bool do_operation() {
ZStatTimer timer(ZPhasePauseMarkStart);
ZServiceabilityMarkStartTracer tracer;
ZCollectedHeap::heap()->increment_total_collections(true /* full */);
ZHeap::heap()->mark_start();
return true;
}
};
class ZMarkEndClosure : public ZOperationClosure {
public:
virtual const char* name() const {
return "ZMarkEnd";
}
virtual bool do_operation() {
ZStatTimer timer(ZPhasePauseMarkEnd);
ZServiceabilityMarkEndTracer tracer;
return ZHeap::heap()->mark_end();
}
};
class ZRelocateStartClosure : public ZOperationClosure {
public:
virtual const char* name() const {
return "ZRelocateStart";
}
virtual bool needs_inactive_gc_locker() const {
return true;
}
virtual bool do_operation() {
ZStatTimer timer(ZPhasePauseRelocateStart);
ZServiceabilityRelocateStartTracer tracer;
ZHeap::heap()->relocate_start();
return true;
}
};
ZDriver::ZDriver() :
_gc_cycle_port(),
_gc_locker_port() {
set_name("ZDriver");
create_and_start();
}
bool ZDriver::vm_operation(ZOperationClosure* cl) {
for (;;) {
VM_ZOperation op(cl);
VMThread::execute(&op);
if (op.gc_locked()) {
// Wait for GC to become unlocked and restart the VM operation
ZStatTimer timer(ZCriticalPhaseGCLockerStall);
_gc_locker_port.wait();
continue;
}
// Notify VM operation completed
_gc_locker_port.ack();
return op.success();
}
}
void ZDriver::collect(GCCause::Cause cause) {
switch (cause) {
case GCCause::_wb_young_gc:
case GCCause::_wb_conc_mark:
case GCCause::_wb_full_gc:
case GCCause::_dcmd_gc_run:
case GCCause::_java_lang_system_gc:
case GCCause::_full_gc_alot:
case GCCause::_scavenge_alot:
case GCCause::_jvmti_force_gc:
case GCCause::_metadata_GC_clear_soft_refs:
// Start synchronous GC
_gc_cycle_port.send_sync(cause);
break;
case GCCause::_z_timer:
case GCCause::_z_warmup:
case GCCause::_z_allocation_rate:
case GCCause::_z_allocation_stall:
case GCCause::_z_proactive:
case GCCause::_metadata_GC_threshold:
// Start asynchronous GC
_gc_cycle_port.send_async(cause);
break;
case GCCause::_gc_locker:
// Restart VM operation previously blocked by the GC locker
_gc_locker_port.signal();
break;
default:
// Other causes not supported
fatal("Unsupported GC cause (%s)", GCCause::to_string(cause));
break;
}
}
GCCause::Cause ZDriver::start_gc_cycle() {
// Wait for GC request
return _gc_cycle_port.receive();
}
class ZSoftReferencePolicyScope : public StackObj {
private:
bool should_clear_soft_reference(GCCause::Cause cause) const {
const bool clear = ZCollectedHeap::heap()->soft_ref_policy()->should_clear_all_soft_refs();
// Clear all soft reference if the policy says so, or if
// the GC cause indicates that we're running low on memory.
return clear ||
cause == GCCause::_z_allocation_stall ||
cause == GCCause::_metadata_GC_clear_soft_refs;
}
void clear_should_clear_soft_reference() const {
ZCollectedHeap::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false);
}
public:
ZSoftReferencePolicyScope(GCCause::Cause cause) {
const bool clear = should_clear_soft_reference(cause);
ZHeap::heap()->set_soft_reference_policy(clear);
clear_should_clear_soft_reference();
}
~ZSoftReferencePolicyScope() {
Universe::update_heap_info_at_gc();
}
};
class ZDriverCycleScope : public StackObj {
private:
GCIdMark _gc_id;
GCCauseSetter _gc_cause_setter;
ZSoftReferencePolicyScope _soft_ref_policy;
ZStatTimer _timer;
bool should_boost_worker_threads(GCCause::Cause cause) const {
return cause == GCCause::_java_lang_system_gc ||
cause == GCCause::_z_allocation_stall;
}
public:
ZDriverCycleScope(GCCause::Cause cause) :
_gc_id(),
_gc_cause_setter(ZCollectedHeap::heap(), cause),
_soft_ref_policy(cause),
_timer(ZPhaseCycle) {
// Update statistics
ZStatCycle::at_start();
// Set boost mode
const bool boost = should_boost_worker_threads(cause);
ZHeap::heap()->set_boost_worker_threads(boost);
}
~ZDriverCycleScope() {
// Calculate boost factor
const double boost_factor = (double)ZHeap::heap()->nconcurrent_worker_threads() /
(double)ZHeap::heap()->nconcurrent_no_boost_worker_threads();
// Update statistics
ZStatCycle::at_end(boost_factor);
}
};
void ZDriver::run_gc_cycle(GCCause::Cause cause) {
ZDriverCycleScope scope(cause);
// Phase 1: Pause Mark Start
{
ZMarkStartClosure cl;
vm_operation(&cl);
}
// Phase 2: Concurrent Mark
{
ZStatTimer timer(ZPhaseConcurrentMark);
ZHeap::heap()->mark();
}
// Phase 3: Pause Mark End
{
ZMarkEndClosure cl;
while (!vm_operation(&cl)) {
// Phase 3.5: Concurrent Mark Continue
ZStatTimer timer(ZPhaseConcurrentMarkContinue);
ZHeap::heap()->mark();
}
}
// Phase 4: Concurrent Process Non-Strong References
{
ZStatTimer timer(ZPhaseConcurrentProcessNonStrongReferences);
ZHeap::heap()->process_non_strong_references();
}
// Phase 5: Concurrent Reset Relocation Set
{
ZStatTimer timer(ZPhaseConcurrentResetRelocationSet);
ZHeap::heap()->reset_relocation_set();
}
// Phase 6: Concurrent Destroy Detached Pages
{
ZStatTimer timer(ZPhaseConcurrentDestroyDetachedPages);
ZHeap::heap()->destroy_detached_pages();
}
// Phase 7: Concurrent Select Relocation Set
{
ZStatTimer timer(ZPhaseConcurrentSelectRelocationSet);
ZHeap::heap()->select_relocation_set();
}
// Phase 8: Prepare Relocation Set
{
ZStatTimer timer(ZPhaseConcurrentPrepareRelocationSet);
ZHeap::heap()->prepare_relocation_set();
}
// Phase 9: Pause Relocate Start
{
ZRelocateStartClosure cl;
vm_operation(&cl);
}
// Phase 10: Concurrent Relocate
{
ZStatTimer timer(ZPhaseConcurrentRelocated);
ZHeap::heap()->relocate();
}
}
void ZDriver::end_gc_cycle() {
// Notify GC cycle completed
_gc_cycle_port.ack();
// Check for out of memory condition
ZHeap::heap()->check_out_of_memory();
}
void ZDriver::run_service() {
// Main loop
while (!should_terminate()) {
const GCCause::Cause cause = start_gc_cycle();
if (cause != GCCause::_no_gc) {
run_gc_cycle(cause);
end_gc_cycle();
}
}
}
void ZDriver::stop_service() {
_gc_cycle_port.send_async(GCCause::_no_gc);
}

@ -0,0 +1,54 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZDRIVER_HPP
#define SHARE_GC_Z_ZDRIVER_HPP
#include "gc/shared/concurrentGCThread.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/z/zMessagePort.hpp"
class ZOperationClosure;
class ZDriver : public ConcurrentGCThread {
private:
ZMessagePort<GCCause::Cause> _gc_cycle_port;
ZRendezvousPort _gc_locker_port;
bool vm_operation(ZOperationClosure* cl);
GCCause::Cause start_gc_cycle();
void run_gc_cycle(GCCause::Cause cause);
void end_gc_cycle();
protected:
virtual void run_service();
virtual void stop_service();
public:
ZDriver();
void collect(GCCause::Cause cause);
};
#endif // SHARE_GC_Z_ZDRIVER_HPP

@ -0,0 +1,50 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zErrno.hpp"
#include <errno.h>
#include <string.h>
ZErrno::ZErrno() :
_error(errno) {}
ZErrno::ZErrno(int error) :
_error(error) {}
ZErrno::operator bool() const {
return _error != 0;
}
bool ZErrno::operator==(int error) const {
return _error == error;
}
bool ZErrno::operator!=(int error) const {
return _error != error;
}
const char* ZErrno::to_string() const {
return strerror(_error);
}

@ -0,0 +1,43 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZERRNO_HPP
#define SHARE_GC_Z_ZERRNO_HPP
#include "memory/allocation.hpp"
class ZErrno : public StackObj {
private:
const int _error;
public:
ZErrno();
ZErrno(int error);
operator bool() const;
bool operator==(int error) const;
bool operator!=(int error) const;
const char* to_string() const;
};
#endif // SHARE_GC_Z_ZERRNO_HPP

@ -0,0 +1,77 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zForwardingTable.inline.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
void ZForwardingTable::setup(size_t live_objects) {
assert(is_null(), "Should be empty");
assert(live_objects > 0, "Invalid size");
// Allocate table for linear probing. The size of the table must be
// a power of two to allow for quick and inexpensive indexing/masking.
// The table is sized to have a load factor of 50%, i.e. sized to have
// double the number of entries actuallly inserted.
_size = ZUtils::round_up_power_of_2(live_objects * 2);
_table = MallocArrayAllocator<ZForwardingTableEntry>::allocate(_size, mtGC);
// Clear table
memset(_table, ZForwardingTableEntry::empty(), _size * sizeof(ZForwardingTableEntry));
}
void ZForwardingTable::reset() {
// Free table
MallocArrayAllocator<ZForwardingTableEntry>::free(_table);
_table = NULL;
_size = 0;
}
void ZForwardingTable::verify(size_t object_max_count, size_t live_objects) const {
size_t count = 0;
for (size_t i = 0; i < _size; i++) {
const ZForwardingTableEntry entry = _table[i];
if (entry.is_empty()) {
// Skip empty entries
continue;
}
// Check from index
guarantee(entry.from_index() < object_max_count, "Invalid from index");
// Check for duplicates
for (size_t j = i + 1; j < _size; j++) {
const ZForwardingTableEntry other = _table[j];
guarantee(entry.from_index() != other.from_index(), "Duplicate from");
guarantee(entry.to_offset() != other.to_offset(), "Duplicate to");
}
count++;
}
// Check number of non-null entries
guarantee(live_objects == count, "Count mismatch");
}

@ -0,0 +1,59 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZFORWARDING_HPP
#define SHARE_GC_Z_ZFORWARDING_HPP
#include "gc/z/zForwardingTableEntry.hpp"
#include "memory/allocation.hpp"
typedef size_t ZForwardingTableCursor;
class ZForwardingTable {
friend class VMStructs;
friend class ZForwardingTableTest;
private:
ZForwardingTableEntry* _table;
size_t _size;
ZForwardingTableEntry at(ZForwardingTableCursor* cursor) const;
ZForwardingTableEntry first(uintptr_t from_index, ZForwardingTableCursor* cursor) const;
ZForwardingTableEntry next(ZForwardingTableCursor* cursor) const;
public:
ZForwardingTable();
~ZForwardingTable();
bool is_null() const;
void setup(size_t live_objects);
void reset();
ZForwardingTableEntry find(uintptr_t from_index) const;
ZForwardingTableEntry find(uintptr_t from_index, ZForwardingTableCursor* cursor) const;
uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingTableCursor* cursor);
void verify(size_t object_max_count, size_t live_objects) const;
};
#endif // SHARE_GC_Z_ZFORWARDING_HPP

@ -0,0 +1,109 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZFORWARDING_INLINE_HPP
#define SHARE_GC_Z_ZFORWARDING_INLINE_HPP
#include "gc/z/zForwardingTable.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHash.inline.hpp"
#include "runtime/atomic.hpp"
#include "utilities/debug.hpp"
inline ZForwardingTable::ZForwardingTable() :
_table(NULL),
_size(0) {}
inline ZForwardingTable::~ZForwardingTable() {
assert(is_null(), "Should be empty");
}
inline ZForwardingTableEntry ZForwardingTable::at(ZForwardingTableCursor* cursor) const {
return _table[*cursor];
}
inline ZForwardingTableEntry ZForwardingTable::first(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
const size_t mask = _size - 1;
const size_t hash = ZHash::uint32_to_uint32((uint32_t)from_index);
*cursor = hash & mask;
return at(cursor);
}
inline ZForwardingTableEntry ZForwardingTable::next(ZForwardingTableCursor* cursor) const {
const size_t mask = _size - 1;
*cursor = (*cursor + 1) & mask;
return at(cursor);
}
inline bool ZForwardingTable::is_null() const {
return _table == NULL;
}
inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index) const {
ZForwardingTableCursor dummy;
return find(from_index, &dummy);
}
inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
// Reading entries in the table races with the atomic cas done for
// insertion into the table. This is safe because each entry is at
// most updated once (from -1 to something else).
ZForwardingTableEntry entry = first(from_index, cursor);
while (!entry.is_empty()) {
if (entry.from_index() == from_index) {
// Match found, return matching entry
return entry;
}
entry = next(cursor);
}
// Match not found, return empty entry
return entry;
}
inline uintptr_t ZForwardingTable::insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingTableCursor* cursor) {
const ZForwardingTableEntry new_entry(from_index, to_offset);
const ZForwardingTableEntry old_entry; // empty
for (;;) {
const ZForwardingTableEntry prev_entry = Atomic::cmpxchg(new_entry, _table + *cursor, old_entry);
if (prev_entry.is_empty()) {
// Success
return to_offset;
}
// Find next empty or matching entry
ZForwardingTableEntry entry = at(cursor);
while (!entry.is_empty()) {
if (entry.from_index() == from_index) {
// Match found, return already inserted address
return entry.to_offset();
}
entry = next(cursor);
}
}
}
#endif // SHARE_GC_Z_ZFORWARDING_INLINE_HPP

@ -0,0 +1,97 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP
#define SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP
#include "gc/z/zBitField.hpp"
#include "memory/allocation.hpp"
#include "metaprogramming/primitiveConversions.hpp"
//
// Forwarding table entry layout
// -----------------------------
//
// 6 4 4 0
// 3 2 1 0
// +------------------------+-----------------------------------------------+
// |11111111 11111111 111111|11 11111111 11111111 11111111 11111111 11111111|
// +------------------------+-----------------------------------------------+
// | |
// | * 41-0 To Object Offset (42-bits)
// |
// * 63-42 From Object Index (22-bits)
//
class ZForwardingTableEntry {
friend struct PrimitiveConversions;
private:
typedef ZBitField<uint64_t, size_t, 0, 42> field_to_offset;
typedef ZBitField<uint64_t, size_t, 42, 22> field_from_index;
uint64_t _entry;
public:
ZForwardingTableEntry() :
_entry(empty()) {}
ZForwardingTableEntry(size_t from_index, size_t to_offset) :
_entry(field_from_index::encode(from_index) |
field_to_offset::encode(to_offset)) {}
static uintptr_t empty() {
return (uintptr_t)-1;
}
bool is_empty() const {
return _entry == empty();
}
size_t to_offset() const {
return field_to_offset::decode(_entry);
}
size_t from_index() const {
return field_from_index::decode(_entry);
}
};
// Needed to allow atomic operations on ZForwardingTableEntry
template <>
struct PrimitiveConversions::Translate<ZForwardingTableEntry> : public TrueType {
typedef ZForwardingTableEntry Value;
typedef uint64_t Decayed;
static Decayed decay(Value v) {
return v._entry;
}
static Value recover(Decayed d) {
ZForwardingTableEntry entry;
entry._entry = d;
return entry;
}
};
#endif // SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP

@ -0,0 +1,41 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZFUTURE_HPP
#define SHARE_GC_Z_ZFUTURE_HPP
#include "memory/allocation.hpp"
#include "runtime/semaphore.hpp"
template <typename T>
class ZFuture {
private:
Semaphore _sema;
T _value;
public:
void set(T value);
T get();
};
#endif // SHARE_GC_Z_ZFUTURE_HPP

@ -0,0 +1,55 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZFUTURE_INLINE_HPP
#define SHARE_GC_Z_ZFUTURE_INLINE_HPP
#include "gc/z/zFuture.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.hpp"
template <typename T>
inline void ZFuture<T>::set(T value) {
// Set value
_value = value;
// Notify waiter
_sema.signal();
}
template <typename T>
inline T ZFuture<T>::get() {
// Wait for notification
Thread* const thread = Thread::current();
if (thread->is_Java_thread()) {
_sema.wait_with_safepoint_check((JavaThread*)thread);
} else {
_sema.wait();
}
// Return value
return _value;
}
#endif // SHARE_GC_Z_ZFUTURE_INLINE_HPP

@ -0,0 +1,37 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
uint32_t ZGlobalPhase = ZPhaseRelocate;
uint32_t ZGlobalSeqNum = 1;
const int& ZObjectAlignmentSmallShift = LogMinObjAlignmentInBytes;
const int& ZObjectAlignmentSmall = MinObjAlignmentInBytes;
uintptr_t ZAddressGoodMask;
uintptr_t ZAddressBadMask = 0;
uintptr_t ZAddressWeakBadMask;
uintptr_t ZAddressMetadataMarked;

@ -0,0 +1,155 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZGLOBALS_HPP
#define SHARE_GC_Z_ZGLOBALS_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include OS_CPU_HEADER(gc/z/zGlobals)
// Collector name
const char* const ZGCName = "The Z Garbage Collector";
// Global phase state
extern uint32_t ZGlobalPhase;
const uint32_t ZPhaseMark = 0;
const uint32_t ZPhaseMarkCompleted = 1;
const uint32_t ZPhaseRelocate = 2;
// Global sequence number
extern uint32_t ZGlobalSeqNum;
// Page types
const uint8_t ZPageTypeSmall = 0;
const uint8_t ZPageTypeMedium = 1;
const uint8_t ZPageTypeLarge = 2;
// Page size shifts
const size_t ZPageSizeSmallShift = ZPlatformPageSizeSmallShift;
const size_t ZPageSizeMediumShift = ZPageSizeSmallShift + 4;
const size_t ZPageSizeMinShift = ZPageSizeSmallShift;
// Page sizes
const size_t ZPageSizeSmall = (size_t)1 << ZPageSizeSmallShift;
const size_t ZPageSizeMedium = (size_t)1 << ZPageSizeMediumShift;
const size_t ZPageSizeMin = (size_t)1 << ZPageSizeMinShift;
// Object size limits
const size_t ZObjectSizeLimitSmall = (ZPageSizeSmall / 8); // Allow 12.5% waste
const size_t ZObjectSizeLimitMedium = (ZPageSizeMedium / 8); // Allow 12.5% waste
// Object alignment shifts
extern const int& ZObjectAlignmentSmallShift;
const int ZObjectAlignmentMediumShift = ZPageSizeMediumShift - 13; // 8192 objects per page
const int ZObjectAlignmentLargeShift = ZPageSizeSmallShift;
// Object alignments
extern const int& ZObjectAlignmentSmall;
const int ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift;
const int ZObjectAlignmentLarge = 1 << ZObjectAlignmentLargeShift;
// Pointer part of address
const uintptr_t ZAddressOffsetShift = 0;
const uintptr_t ZAddressOffsetBits = ZPlatformAddressOffsetBits;
const uintptr_t ZAddressOffsetMask = (((uintptr_t)1 << ZAddressOffsetBits) - 1) << ZAddressOffsetShift;
const size_t ZAddressOffsetMax = (uintptr_t)1 << ZAddressOffsetBits;
// Metadata part of address
const uintptr_t ZAddressMetadataShift = ZPlatformAddressMetadataShift;
const uintptr_t ZAddressMetadataBits = 4;
const uintptr_t ZAddressMetadataMask = (((uintptr_t)1 << ZAddressMetadataBits) - 1) << ZAddressMetadataShift;
// Metadata types
const uintptr_t ZAddressMetadataMarked0 = (uintptr_t)1 << (ZAddressMetadataShift + 0);
const uintptr_t ZAddressMetadataMarked1 = (uintptr_t)1 << (ZAddressMetadataShift + 1);
const uintptr_t ZAddressMetadataRemapped = (uintptr_t)1 << (ZAddressMetadataShift + 2);
const uintptr_t ZAddressMetadataFinalizable = (uintptr_t)1 << (ZAddressMetadataShift + 3);
// Address space start/end/size
const uintptr_t ZAddressSpaceStart = ZPlatformAddressSpaceStart;
const uintptr_t ZAddressSpaceSize = ZPlatformAddressSpaceSize;
const uintptr_t ZAddressSpaceEnd = ZAddressSpaceStart + ZAddressSpaceSize;
// Cache line size
const size_t ZCacheLineSize = ZPlatformCacheLineSize;
// Reserved start/end
uintptr_t ZAddressReservedStart();
uintptr_t ZAddressReservedEnd();
//
// Good/Bad mask states
// --------------------
//
// GoodMask BadMask WeakGoodMask WeakBadMask
// --------------------------------------------------------------
// Marked0 001 110 101 010
// Marked1 010 101 110 001
// Remapped 100 011 100 011
//
// Good/bad masks
extern uintptr_t ZAddressGoodMask;
extern uintptr_t ZAddressBadMask;
extern uintptr_t ZAddressWeakBadMask;
// Marked state
extern uintptr_t ZAddressMetadataMarked;
// Address space for mark stack allocations
const size_t ZMarkStackSpaceSizeShift = 40; // 1TB
const size_t ZMarkStackSpaceSize = (size_t)1 << ZMarkStackSpaceSizeShift;
const uintptr_t ZMarkStackSpaceStart = ZAddressSpaceEnd + ZMarkStackSpaceSize;
const uintptr_t ZMarkStackSpaceEnd = ZMarkStackSpaceStart + ZMarkStackSpaceSize;
const size_t ZMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M
// Mark stack and magazine sizes
const size_t ZMarkStackSizeShift = 11; // 2K
const size_t ZMarkStackSize = (size_t)1 << ZMarkStackSizeShift;
const size_t ZMarkStackHeaderSize = (size_t)1 << 4; // 16B
const size_t ZMarkStackSlots = (ZMarkStackSize - ZMarkStackHeaderSize) / sizeof(uintptr_t);
const size_t ZMarkStackMagazineSize = (size_t)1 << 15; // 32K
const size_t ZMarkStackMagazineSlots = (ZMarkStackMagazineSize / ZMarkStackSize) - 1;
// Mark stripe size
const size_t ZMarkStripeShift = ZPageSizeMinShift;
// Max number of mark stripes
const size_t ZMarkStripesMax = 16; // Must be a power of two
// Mark cache size
const size_t ZMarkCacheSize = 1024; // Must be a power of two
// Partial array minimum size
const size_t ZMarkPartialArrayMinSizeShift = 12; // 4K
const size_t ZMarkPartialArrayMinSize = (size_t)1 << ZMarkPartialArrayMinSizeShift;
// Max number of proactive/terminate flush attempts
const size_t ZMarkProactiveFlushMax = 10;
const size_t ZMarkTerminateFlushMax = 3;
// Try complete mark timeout
const uint64_t ZMarkCompleteTimeout = 1; // ms
#endif // SHARE_GC_Z_ZGLOBALS_HPP

@ -0,0 +1,36 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZHASH_HPP
#define SHARE_GC_Z_ZHASH_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class ZHash : public AllStatic {
public:
static uint32_t uint32_to_uint32(uint32_t key);
static uint32_t address_to_uint32(uintptr_t key);
};
#endif // SHARE_GC_Z_ZHASH_HPP

@ -0,0 +1,43 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZHASH_INLINE_HPP
#define SHARE_GC_Z_ZHASH_INLINE_HPP
#include "gc/z/zHash.hpp"
inline uint32_t ZHash::uint32_to_uint32(uint32_t key) {
key = ~key + (key << 15);
key = key ^ (key >> 12);
key = key + (key << 2);
key = key ^ (key >> 4);
key = key * 2057;
key = key ^ (key >> 16);
return key;
}
inline uint32_t ZHash::address_to_uint32(uintptr_t key) {
return uint32_to_uint32((uint32_t)(key >> 3));
}
#endif // SHARE_GC_Z_ZHASH_INLINE_HPP

@ -0,0 +1,583 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/z/zAddress.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp"
#include "gc/z/zRelocationSet.inline.hpp"
#include "gc/z/zResurrection.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zTask.hpp"
#include "gc/z/zThread.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
#include "gc/z/zWorkers.inline.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
ZHeap* ZHeap::_heap = NULL;
ZHeap::ZHeap() :
_workers(),
_object_allocator(_workers.nworkers()),
_page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
_pagetable(),
_mark(&_workers, &_pagetable),
_reference_processor(&_workers),
_weak_roots_processor(&_workers),
_relocate(&_workers),
_relocation_set(),
_serviceability(heap_min_size(), heap_max_size()) {
// Install global heap instance
assert(_heap == NULL, "Already initialized");
_heap = this;
// Update statistics
ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
}
size_t ZHeap::heap_min_size() const {
const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
return MIN2(aligned_min_size, heap_max_size());
}
size_t ZHeap::heap_max_size() const {
const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
return MIN2(aligned_max_size, ZAddressOffsetMax);
}
size_t ZHeap::heap_max_reserve_size() const {
// Reserve one small page per worker plus one shared medium page. This is still just
// an estimate and doesn't guarantee that we can't run out of memory during relocation.
const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
return MIN2(max_reserve_size, heap_max_size());
}
bool ZHeap::is_initialized() const {
return _page_allocator.is_initialized();
}
size_t ZHeap::min_capacity() const {
return heap_min_size();
}
size_t ZHeap::max_capacity() const {
return _page_allocator.max_capacity();
}
size_t ZHeap::capacity() const {
return _page_allocator.capacity();
}
size_t ZHeap::max_reserve() const {
return _page_allocator.max_reserve();
}
size_t ZHeap::used_high() const {
return _page_allocator.used_high();
}
size_t ZHeap::used_low() const {
return _page_allocator.used_low();
}
size_t ZHeap::used() const {
return _page_allocator.used();
}
size_t ZHeap::allocated() const {
return _page_allocator.allocated();
}
size_t ZHeap::reclaimed() const {
return _page_allocator.reclaimed();
}
size_t ZHeap::tlab_capacity() const {
return capacity();
}
size_t ZHeap::tlab_used() const {
return _object_allocator.used();
}
size_t ZHeap::max_tlab_size() const {
return ZObjectSizeLimitSmall;
}
size_t ZHeap::unsafe_max_tlab_alloc() const {
size_t size = _object_allocator.remaining();
if (size < MinTLABSize) {
// The remaining space in the allocator is not enough to
// fit the smallest possible TLAB. This means that the next
// TLAB allocation will force the allocator to get a new
// backing page anyway, which in turn means that we can then
// fit the larges possible TLAB.
size = max_tlab_size();
}
return MIN2(size, max_tlab_size());
}
bool ZHeap::is_in(uintptr_t addr) const {
if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
return false;
}
const ZPage* const page = _pagetable.get(addr);
if (page != NULL) {
return page->is_in(addr);
}
return false;
}
uintptr_t ZHeap::block_start(uintptr_t addr) const {
const ZPage* const page = _pagetable.get(addr);
return page->block_start(addr);
}
size_t ZHeap::block_size(uintptr_t addr) const {
const ZPage* const page = _pagetable.get(addr);
return page->block_size(addr);
}
bool ZHeap::block_is_obj(uintptr_t addr) const {
const ZPage* const page = _pagetable.get(addr);
return page->block_is_obj(addr);
}
uint ZHeap::nconcurrent_worker_threads() const {
return _workers.nconcurrent();
}
uint ZHeap::nconcurrent_no_boost_worker_threads() const {
return _workers.nconcurrent_no_boost();
}
void ZHeap::set_boost_worker_threads(bool boost) {
_workers.set_boost(boost);
}
void ZHeap::worker_threads_do(ThreadClosure* tc) const {
_workers.threads_do(tc);
}
void ZHeap::print_worker_threads_on(outputStream* st) const {
_workers.print_threads_on(st);
}
void ZHeap::out_of_memory() {
ResourceMark rm;
ZStatInc(ZCounterOutOfMemory);
log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
}
ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = _page_allocator.alloc_page(type, size, flags);
if (page != NULL) {
// Update pagetable
_pagetable.insert(page);
}
return page;
}
void ZHeap::undo_alloc_page(ZPage* page) {
assert(page->is_allocating(), "Invalid page state");
ZStatInc(ZCounterUndoPageAllocation);
log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
ZThread::id(), ZThread::name(), p2i(page), page->size());
release_page(page, false /* reclaimed */);
}
bool ZHeap::retain_page(ZPage* page) {
return page->inc_refcount();
}
void ZHeap::release_page(ZPage* page, bool reclaimed) {
if (page->dec_refcount()) {
_page_allocator.free_page(page, reclaimed);
}
}
void ZHeap::flip_views() {
// For debugging only
if (ZUnmapBadViews) {
// Flip pages
ZPageTableIterator iter(&_pagetable);
for (ZPage* page; iter.next(&page);) {
if (!page->is_detached()) {
_page_allocator.flip_page(page);
}
}
// Flip pre-mapped memory
_page_allocator.flip_pre_mapped();
}
}
void ZHeap::mark_start() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
// Update statistics
ZStatSample(ZSamplerHeapUsedBeforeMark, used());
// Retire TLABs
_object_allocator.retire_tlabs();
// Flip address view
ZAddressMasks::flip_to_marked();
flip_views();
// Reset allocated/reclaimed/used statistics
_page_allocator.reset_statistics();
// Reset encountered/dropped/enqueued statistics
_reference_processor.reset_statistics();
// Enter mark phase
ZGlobalPhase = ZPhaseMark;
// Reset marking information and mark roots
_mark.start();
// Update statistics
ZStatHeap::set_at_mark_start(capacity(), used());
}
void ZHeap::mark() {
_mark.mark();
}
void ZHeap::mark_flush_and_free(Thread* thread) {
_mark.flush_and_free(thread);
}
class ZFixupPartialLoadsTask : public ZTask {
private:
ZThreadRootsIterator _thread_roots;
public:
ZFixupPartialLoadsTask() :
ZTask("ZFixupPartialLoadsTask"),
_thread_roots() {}
virtual void work() {
ZMarkRootOopClosure cl;
_thread_roots.oops_do(&cl);
}
};
void ZHeap::fixup_partial_loads() {
ZFixupPartialLoadsTask task;
_workers.run_parallel(&task);
}
bool ZHeap::mark_end() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
// C2 can generate code where a safepoint poll is inserted
// between a load and the associated load barrier. To handle
// this case we need to rescan the thread stack here to make
// sure such oops are marked.
fixup_partial_loads();
// Try end marking
if (!_mark.end()) {
// Marking not completed, continue concurrent mark
return false;
}
// Enter mark completed phase
ZGlobalPhase = ZPhaseMarkCompleted;
// Resize metaspace
MetaspaceGC::compute_new_size();
// Update statistics
ZStatSample(ZSamplerHeapUsedAfterMark, used());
ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
// Block resurrection of weak/phantom references
ZResurrection::block();
// Process weak roots
_weak_roots_processor.process_weak_roots();
// Verification
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
Universe::verify();
}
return true;
}
void ZHeap::set_soft_reference_policy(bool clear) {
_reference_processor.set_soft_reference_policy(clear);
}
void ZHeap::process_non_strong_references() {
// Process Soft/Weak/Final/PhantomReferences
_reference_processor.process_references();
// Process concurrent weak roots
_weak_roots_processor.process_concurrent_weak_roots();
// Unblock resurrection of weak/phantom references
ZResurrection::unblock();
// Enqueue Soft/Weak/Final/PhantomReferences. Note that this
// must be done after unblocking resurrection. Otherwise the
// Finalizer thread could call Reference.get() on the Finalizers
// that were just enqueued, which would incorrectly return null
// during the resurrection block window, since such referents
// are only Finalizable marked.
_reference_processor.enqueue_references();
}
void ZHeap::destroy_detached_pages() {
ZList<ZPage> list;
_page_allocator.flush_detached_pages(&list);
for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
// Remove pagetable entry
_pagetable.remove(page);
// Delete the page
_page_allocator.destroy_page(page);
}
}
void ZHeap::select_relocation_set() {
// Register relocatable pages with selector
ZRelocationSetSelector selector;
ZPageTableIterator iter(&_pagetable);
for (ZPage* page; iter.next(&page);) {
if (!page->is_relocatable()) {
// Not relocatable, don't register
continue;
}
if (page->is_marked()) {
// Register live page
selector.register_live_page(page);
} else {
// Register garbage page
selector.register_garbage_page(page);
// Reclaim page immediately
release_page(page, true /* reclaimed */);
}
}
// Select pages to relocate
selector.select(&_relocation_set);
// Update statistics
ZStatRelocation::set_at_select_relocation_set(selector.relocating());
ZStatHeap::set_at_select_relocation_set(selector.live(),
selector.garbage(),
reclaimed());
}
void ZHeap::prepare_relocation_set() {
ZRelocationSetIterator iter(&_relocation_set);
for (ZPage* page; iter.next(&page);) {
// Prepare for relocation
page->set_forwarding();
// Update pagetable
_pagetable.set_relocating(page);
}
}
void ZHeap::reset_relocation_set() {
ZRelocationSetIterator iter(&_relocation_set);
for (ZPage* page; iter.next(&page);) {
// Reset relocation information
page->reset_forwarding();
// Update pagetable
_pagetable.clear_relocating(page);
}
}
void ZHeap::relocate_start() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
// Update statistics
ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
// Flip address view
ZAddressMasks::flip_to_remapped();
flip_views();
// Remap TLABs
_object_allocator.remap_tlabs();
// Enter relocate phase
ZGlobalPhase = ZPhaseRelocate;
// Update statistics
ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
// Remap/Relocate roots
_relocate.start();
}
uintptr_t ZHeap::relocate_object(uintptr_t addr) {
assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
ZPage* const page = _pagetable.get(addr);
const bool retained = retain_page(page);
const uintptr_t new_addr = page->relocate_object(addr);
if (retained) {
release_page(page, true /* reclaimed */);
}
return new_addr;
}
uintptr_t ZHeap::forward_object(uintptr_t addr) {
assert(ZGlobalPhase == ZPhaseMark ||
ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
ZPage* const page = _pagetable.get(addr);
return page->forward_object(addr);
}
void ZHeap::relocate() {
// Relocate relocation set
const bool success = _relocate.relocate(&_relocation_set);
// Update statistics
ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
ZStatRelocation::set_at_relocate_end(success);
ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
used(), used_high(), used_low());
}
void ZHeap::object_iterate(ObjectClosure* cl) {
// Should only be called in a safepoint after mark end.
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZHeapIterator iter;
iter.objects_do(cl);
}
void ZHeap::serviceability_initialize() {
_serviceability.initialize();
}
GCMemoryManager* ZHeap::serviceability_memory_manager() {
return _serviceability.memory_manager();
}
MemoryPool* ZHeap::serviceability_memory_pool() {
return _serviceability.memory_pool();
}
ZServiceabilityCounters* ZHeap::serviceability_counters() {
return _serviceability.counters();
}
void ZHeap::print_on(outputStream* st) const {
st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
used() / M,
capacity() / M,
max_capacity() / M);
MetaspaceUtils::print_on(st);
}
void ZHeap::print_extended_on(outputStream* st) const {
print_on(st);
st->cr();
ZPageTableIterator iter(&_pagetable);
for (ZPage* page; iter.next(&page);) {
page->print_on(st);
}
st->cr();
}
class ZVerifyRootsTask : public ZTask {
private:
ZRootsIterator _strong_roots;
ZWeakRootsIterator _weak_roots;
public:
ZVerifyRootsTask() :
ZTask("ZVerifyRootsTask"),
_strong_roots(),
_weak_roots() {}
virtual void work() {
ZVerifyRootOopClosure cl;
_strong_roots.oops_do(&cl);
_weak_roots.oops_do(&cl);
}
};
void ZHeap::verify() {
// Heap verification can only be done between mark end and
// relocate start. This is the only window where all oop are
// good and the whole heap is in a consistent state.
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
{
ZVerifyRootsTask task;
_workers.run_parallel(&task);
}
{
ZVerifyObjectClosure cl;
object_iterate(&cl);
}
}

@ -0,0 +1,171 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZHEAP_HPP
#define SHARE_GC_Z_ZHEAP_HPP
#include "gc/shared/gcTimer.hpp"
#include "gc/z/zAllocationFlags.hpp"
#include "gc/z/zArray.hpp"
#include "gc/z/zList.hpp"
#include "gc/z/zLock.hpp"
#include "gc/z/zMark.hpp"
#include "gc/z/zObjectAllocator.hpp"
#include "gc/z/zPage.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zPageTable.hpp"
#include "gc/z/zReferenceProcessor.hpp"
#include "gc/z/zRelocate.hpp"
#include "gc/z/zRelocationSet.hpp"
#include "gc/z/zRelocationSetSelector.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zWeakRootsProcessor.hpp"
#include "gc/z/zServiceability.hpp"
#include "gc/z/zWorkers.hpp"
#include "memory/allocation.hpp"
class ZHeap {
friend class VMStructs;
private:
static ZHeap* _heap;
ZWorkers _workers;
ZObjectAllocator _object_allocator;
ZPageAllocator _page_allocator;
ZPageTable _pagetable;
ZMark _mark;
ZReferenceProcessor _reference_processor;
ZWeakRootsProcessor _weak_roots_processor;
ZRelocate _relocate;
ZRelocationSet _relocation_set;
ZServiceability _serviceability;
size_t heap_min_size() const;
size_t heap_max_size() const;
size_t heap_max_reserve_size() const;
void out_of_memory();
void flip_views();
void fixup_partial_loads();
public:
static ZHeap* heap();
ZHeap();
bool is_initialized() const;
// Heap metrics
size_t min_capacity() const;
size_t max_capacity() const;
size_t capacity() const;
size_t max_reserve() const;
size_t used_high() const;
size_t used_low() const;
size_t used() const;
size_t allocated() const;
size_t reclaimed() const;
size_t tlab_capacity() const;
size_t tlab_used() const;
size_t max_tlab_size() const;
size_t unsafe_max_tlab_alloc() const;
bool is_in(uintptr_t addr) const;
// Block
uintptr_t block_start(uintptr_t addr) const;
size_t block_size(uintptr_t addr) const;
bool block_is_obj(uintptr_t addr) const;
// Workers
uint nconcurrent_worker_threads() const;
uint nconcurrent_no_boost_worker_threads() const;
void set_boost_worker_threads(bool boost);
void worker_threads_do(ThreadClosure* tc) const;
void print_worker_threads_on(outputStream* st) const;
// Reference processing
ReferenceDiscoverer* reference_discoverer();
void set_soft_reference_policy(bool clear);
// Non-strong reference processing
void process_non_strong_references();
// Page allocation
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void undo_alloc_page(ZPage* page);
bool retain_page(ZPage* page);
void release_page(ZPage* page, bool reclaimed);
// Object allocation
uintptr_t alloc_tlab(size_t size);
uintptr_t alloc_object(size_t size);
uintptr_t alloc_object_for_relocation(size_t size);
void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
void check_out_of_memory();
// Marking
bool is_object_live(uintptr_t addr) const;
bool is_object_strongly_live(uintptr_t addr) const;
template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
void mark_start();
void mark();
void mark_flush_and_free(Thread* thread);
bool mark_end();
// Post-marking & Pre-relocation
void destroy_detached_pages();
// Relocation set
void select_relocation_set();
void prepare_relocation_set();
void reset_relocation_set();
// Relocation
bool is_relocating(uintptr_t addr) const;
void relocate_start();
uintptr_t relocate_object(uintptr_t addr);
uintptr_t forward_object(uintptr_t addr);
void relocate();
// Iteration
void object_iterate(ObjectClosure* cl);
// Serviceability
void serviceability_initialize();
GCMemoryManager* serviceability_memory_manager();
MemoryPool* serviceability_memory_pool();
ZServiceabilityCounters* serviceability_counters();
// Printing
void print_on(outputStream* st) const;
void print_extended_on(outputStream* st) const;
// Verification
bool is_oop(oop object) const;
void verify();
};
#endif // SHARE_GC_Z_ZHEAP_HPP

@ -0,0 +1,100 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZHEAP_INLINE_HPP
#define SHARE_GC_Z_ZHEAP_INLINE_HPP
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "utilities/debug.hpp"
inline ZHeap* ZHeap::heap() {
assert(_heap != NULL, "Not initialized");
return _heap;
}
inline ReferenceDiscoverer* ZHeap::reference_discoverer() {
return &_reference_processor;
}
inline bool ZHeap::is_relocating(uintptr_t addr) const {
return _pagetable.is_relocating(addr);
}
inline bool ZHeap::is_object_live(uintptr_t addr) const {
ZPage* page = _pagetable.get(addr);
return page->is_object_live(addr);
}
inline bool ZHeap::is_object_strongly_live(uintptr_t addr) const {
ZPage* page = _pagetable.get(addr);
return page->is_object_strongly_live(addr);
}
template <bool finalizable, bool publish>
inline void ZHeap::mark_object(uintptr_t addr) {
assert(ZGlobalPhase == ZPhaseMark, "Mark not allowed");
_mark.mark_object<finalizable, publish>(addr);
}
inline uintptr_t ZHeap::alloc_tlab(size_t size) {
guarantee(size <= max_tlab_size(), "TLAB too large");
return _object_allocator.alloc_object(size);
}
inline uintptr_t ZHeap::alloc_object(size_t size) {
uintptr_t addr = _object_allocator.alloc_object(size);
assert(ZAddress::is_good_or_null(addr), "Bad address");
if (addr == 0) {
out_of_memory();
}
return addr;
}
inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
uintptr_t addr = _object_allocator.alloc_object_for_relocation(size);
assert(ZAddress::is_good_or_null(addr), "Bad address");
return addr;
}
inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
ZPage* const page = _pagetable.get(addr);
_object_allocator.undo_alloc_object_for_relocation(page, addr, size);
}
inline void ZHeap::check_out_of_memory() {
_page_allocator.check_out_of_memory();
}
inline bool ZHeap::is_oop(oop object) const {
return ZOop::is_good(object);
}
#endif // SHARE_GC_Z_ZHEAP_INLINE_HPP

@ -0,0 +1,183 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddressRangeMap.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeapIterator.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
private:
CHeapBitMap _map;
public:
ZHeapIteratorBitMap(size_t size_in_bits) :
_map(size_in_bits) {}
bool try_set_bit(size_t index) {
if (_map.at(index)) {
return false;
}
_map.set_bit(index);
return true;
}
};
class ZHeapIteratorRootOopClosure : public OopClosure {
private:
ZHeapIterator* const _iter;
ObjectClosure* const _cl;
public:
ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) :
_iter(iter),
_cl(cl) {}
virtual void do_oop(oop* p) {
// Load barrier needed here for the same reason we
// need fixup_partial_loads() in ZHeap::mark_end()
const oop obj = RootAccess<>::oop_load(p);
_iter->push(obj);
_iter->drain(_cl);
}
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
};
class ZHeapIteratorPushOopClosure : public ExtendedOopClosure {
private:
ZHeapIterator* const _iter;
const oop _base;
public:
ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) :
_iter(iter),
_base(base) {}
void do_oop_nv(oop* p) {
const oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
_iter->push(obj);
}
void do_oop_nv(narrowOop* p) {
ShouldNotReachHere();
}
virtual void do_oop(oop* p) {
do_oop_nv(p);
}
virtual void do_oop(narrowOop* p) {
do_oop_nv(p);
}
#ifdef ASSERT
virtual bool should_verify_oops() {
return false;
}
#endif
};
ZHeapIterator::ZHeapIterator() :
_visit_stack(),
_visit_map() {}
ZHeapIterator::~ZHeapIterator() {
ZVisitMapIterator iter(&_visit_map);
for (ZHeapIteratorBitMap* map; iter.next(&map);) {
delete map;
}
}
size_t ZHeapIterator::object_index_max() const {
return ZPageSizeMin >> ZObjectAlignmentSmallShift;
}
size_t ZHeapIterator::object_index(oop obj) const {
const uintptr_t addr = ZOop::to_address(obj);
const uintptr_t offset = ZAddress::offset(addr);
const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
return (offset & mask) >> ZObjectAlignmentSmallShift;
}
ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
const uintptr_t addr = ZOop::to_address(obj);
ZHeapIteratorBitMap* map = _visit_map.get(addr);
if (map == NULL) {
map = new ZHeapIteratorBitMap(object_index_max());
_visit_map.put(addr, map);
}
return map;
}
void ZHeapIterator::push(oop obj) {
if (obj == NULL) {
// Ignore
return;
}
ZHeapIteratorBitMap* const map = object_map(obj);
const size_t index = object_index(obj);
if (!map->try_set_bit(index)) {
// Already pushed
return;
}
// Push
_visit_stack.push(obj);
}
void ZHeapIterator::drain(ObjectClosure* cl) {
while (!_visit_stack.is_empty()) {
const oop obj = _visit_stack.pop();
// Visit
cl->do_object(obj);
// Push members to visit
ZHeapIteratorPushOopClosure push_cl(this, obj);
obj->oop_iterate(&push_cl);
}
}
void ZHeapIterator::objects_do(ObjectClosure* cl) {
ZHeapIteratorRootOopClosure root_cl(this, cl);
ZRootsIterator roots;
// Follow roots. Note that we also visit the JVMTI weak tag map
// as if they where strong roots to make sure we visit all tagged
// objects, even those that might now have become unreachable.
// If we didn't do this the user would have expected to see
// ObjectFree events for unreachable objects in the tag map.
roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
}

@ -0,0 +1,60 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZHEAPITERATOR_HPP
#define SHARE_GC_Z_ZHEAPITERATOR_HPP
#include "gc/z/zAddressRangeMap.hpp"
#include "gc/z/zGlobals.hpp"
#include "memory/allocation.hpp"
#include "utilities/stack.hpp"
class ZHeapIteratorBitMap;
class ZHeapIterator : public StackObj {
friend class ZHeapIteratorRootOopClosure;
friend class ZHeapIteratorPushOopClosure;
private:
typedef ZAddressRangeMap<ZHeapIteratorBitMap*, ZPageSizeMinShift> ZVisitMap;
typedef ZAddressRangeMapIterator<ZHeapIteratorBitMap*, ZPageSizeMinShift> ZVisitMapIterator;
typedef Stack<oop, mtGC> ZVisitStack;
ZVisitStack _visit_stack;
ZVisitMap _visit_map;
size_t object_index_max() const;
size_t object_index(oop obj) const;
ZHeapIteratorBitMap* object_map(oop obj);
void push(oop obj);
void drain(ObjectClosure* cl);
public:
ZHeapIterator();
~ZHeapIterator();
void objects_do(ObjectClosure* cl);
};
#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP

@ -0,0 +1,51 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zCPU.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zInitialize.hpp"
#include "gc/z/zLargePages.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zTracer.hpp"
#include "logging/log.hpp"
#include "runtime/vm_version.hpp"
ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
log_info(gc, init)("Initializing %s", ZGCName);
log_info(gc, init)("Version: %s (%s)",
Abstract_VM_Version::vm_release(),
Abstract_VM_Version::jdk_debug_level());
// Early initialization
ZAddressMasks::initialize();
ZNUMA::initialize();
ZCPU::initialize();
ZStatValue::initialize();
ZTracer::initialize();
ZLargePages::initialize();
ZBarrierSet::set_barrier_set(barrier_set);
}

@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZINITIALIZE_HPP
#define SHARE_GC_Z_ZINITIALIZE_HPP
#include "memory/allocation.hpp"
class ZBarrierSet;
class ZInitialize {
public:
ZInitialize(ZBarrierSet* barrier_set);
};
#endif // SHARE_GC_Z_ZINITIALIZE_HPP

@ -0,0 +1,49 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zLargePages.hpp"
#include "logging/log.hpp"
#include "runtime/os.hpp"
ZLargePages::State ZLargePages::_state;
void ZLargePages::initialize() {
initialize_platform();
log_info(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M);
log_info(gc, init)("Large Page Support: %s", to_string());
}
const char* ZLargePages::to_string() {
switch (_state) {
case Explicit:
return "Enabled (Explicit)";
case Transparent:
return "Enabled (Transparent)";
default:
return "Disabled";
}
}

@ -0,0 +1,51 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZLARGEPAGES_HPP
#define SHARE_GC_Z_ZLARGEPAGES_HPP
#include "memory/allocation.hpp"
class ZLargePages : public AllStatic {
private:
enum State {
Disabled,
Explicit,
Transparent
};
static State _state;
static void initialize_platform();
public:
static void initialize();
static bool is_enabled();
static bool is_explicit();
static bool is_transparent();
static const char* to_string();
};
#endif // SHARE_GC_Z_ZLARGEPAGES_HPP

@ -0,0 +1,41 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP
#define SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP
#include "gc/z/zLargePages.hpp"
inline bool ZLargePages::is_enabled() {
return _state != Disabled;
}
inline bool ZLargePages::is_explicit() {
return _state == Explicit;
}
inline bool ZLargePages::is_transparent() {
return _state == Transparent;
}
#endif // SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP

@ -0,0 +1,240 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZLIST_HPP
#define SHARE_GC_Z_ZLIST_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
template <typename T> class ZList;
// Element in a double linked list
template <typename T>
class ZListNode {
friend class ZList<T>;
private:
ZListNode* _next;
ZListNode* _prev;
ZListNode(ZListNode* next, ZListNode* prev) :
_next(next),
_prev(prev) {}
void set_unused() {
_next = NULL;
_prev = NULL;
}
public:
ZListNode() {
set_unused();
}
~ZListNode() {
set_unused();
}
bool is_unused() const {
return _next == NULL && _prev == NULL;
}
};
// Double-linked list
template <typename T>
class ZList {
private:
ZListNode<T> _head;
size_t _size;
// Passing by value and assignment is not allowed
ZList(const ZList<T>& list);
ZList<T>& operator=(const ZList<T>& list);
void verify() const {
assert(_head._next->_prev == &_head, "List corrupt");
assert(_head._prev->_next == &_head, "List corrupt");
}
void insert(ZListNode<T>* before, ZListNode<T>* node) {
verify();
assert(node->is_unused(), "Already in a list");
node->_prev = before;
node->_next = before->_next;
before->_next = node;
node->_next->_prev = node;
_size++;
}
ZListNode<T>* cast_to_inner(T* elem) const {
return &elem->_node;
}
T* cast_to_outer(ZListNode<T>* node) const {
return (T*)((uintptr_t)node - offset_of(T, _node));
}
public:
ZList() :
_head(&_head, &_head),
_size(0) {
verify();
}
size_t size() const {
verify();
return _size;
}
bool is_empty() const {
return _size == 0;
}
T* first() const {
return is_empty() ? NULL : cast_to_outer(_head._next);
}
T* last() const {
return is_empty() ? NULL : cast_to_outer(_head._prev);
}
T* next(T* elem) const {
verify();
ZListNode<T>* next = cast_to_inner(elem)->_next;
return (next == &_head) ? NULL : cast_to_outer(next);
}
T* prev(T* elem) const {
verify();
ZListNode<T>* prev = cast_to_inner(elem)->_prev;
return (prev == &_head) ? NULL : cast_to_outer(prev);
}
void insert_first(T* elem) {
insert(&_head, cast_to_inner(elem));
}
void insert_last(T* elem) {
insert(_head._prev, cast_to_inner(elem));
}
void insert_before(T* before, T* elem) {
insert(cast_to_inner(before)->_prev, cast_to_inner(elem));
}
void insert_after(T* after, T* elem) {
insert(cast_to_inner(after), cast_to_inner(elem));
}
void remove(T* elem) {
verify();
ZListNode<T>* const node = cast_to_inner(elem);
assert(!node->is_unused(), "Not in a list");
ZListNode<T>* const next = node->_next;
ZListNode<T>* const prev = node->_prev;
assert(next->_prev == node, "List corrupt");
assert(prev->_next == node, "List corrupt");
prev->_next = next;
next->_prev = prev;
node->set_unused();
_size--;
}
T* remove_first() {
T* elem = first();
if (elem != NULL) {
remove(elem);
}
return elem;
}
T* remove_last() {
T* elem = last();
if (elem != NULL) {
remove(elem);
}
return elem;
}
void transfer(ZList<T>* list) {
verify();
if (!list->is_empty()) {
list->_head._next->_prev = _head._prev;
list->_head._prev->_next = _head._prev->_next;
_head._prev->_next = list->_head._next;
_head._prev = list->_head._prev;
list->_head._next = &list->_head;
list->_head._prev = &list->_head;
_size += list->_size;
list->_size = 0;
list->verify();
verify();
}
}
};
template <typename T, bool forward>
class ZListIteratorImpl : public StackObj {
private:
ZList<T>* const _list;
T* _next;
public:
ZListIteratorImpl(ZList<T>* list);
bool next(T** elem);
};
// Iterator types
#define ZLIST_FORWARD true
#define ZLIST_REVERSE false
template <typename T>
class ZListIterator : public ZListIteratorImpl<T, ZLIST_FORWARD> {
public:
ZListIterator(ZList<T>* list) :
ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
};
template <typename T>
class ZListReverseIterator : public ZListIteratorImpl<T, ZLIST_REVERSE> {
public:
ZListReverseIterator(ZList<T>* list) :
ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
};
#endif // SHARE_GC_Z_ZLIST_HPP

Some files were not shown because too many files have changed in this diff Show More