8221766: Load-reference barriers for Shenandoah
Reviewed-by: kvn, erikj, aph, shade
This commit is contained in:
parent
60e51498f2
commit
03ab1404f0
@ -172,8 +172,6 @@ endif
|
||||
ifneq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/shenandoah
|
||||
else
|
||||
JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, jfr), true)
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
#define __ masm->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, RegSet saved_regs) {
|
||||
@ -198,60 +198,31 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2.
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
write_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
assert(dst != rscratch1, "need rscratch1");
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
assert(dst != rscratch2, "need rscratch2");
|
||||
|
||||
Label done;
|
||||
|
||||
__ enter();
|
||||
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(rscratch1, gc_state);
|
||||
__ ldrb(rscratch2, gc_state);
|
||||
|
||||
// Check for heap stability
|
||||
__ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// Heap is unstable, need to perform the read-barrier even if WB is inactive
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
|
||||
// Check for evacuation-in-progress and jump to WB slow-path if needed
|
||||
__ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
__ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
|
||||
|
||||
RegSet to_save = RegSet::of(r0);
|
||||
if (dst != r0) {
|
||||
@ -259,7 +230,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ mov(r0, dst);
|
||||
}
|
||||
|
||||
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
|
||||
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
|
||||
|
||||
if (dst != r0) {
|
||||
__ mov(dst, r0);
|
||||
@ -267,14 +238,11 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
__ leave();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
write_barrier_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
// Save possibly live regs.
|
||||
RegSet live_regs = RegSet::range(r0, r4) - dst;
|
||||
__ push(live_regs, sp);
|
||||
@ -286,44 +254,45 @@ void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Regis
|
||||
__ ldrd(v0, __ post(sp, 2 * wordSize));
|
||||
__ pop(live_regs, sp);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
load_reference_barrier_not_null(masm, dst, tmp);
|
||||
__ bind(is_null);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
if (in_heap) {
|
||||
read_barrier_not_null(masm, src.base());
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
|
||||
__ enter();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
if (on_oop) {
|
||||
load_reference_barrier(masm, dst, tmp1);
|
||||
|
||||
if (ShenandoahKeepAliveBarrier && on_reference) {
|
||||
__ enter();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
if (in_heap) {
|
||||
write_barrier(masm, dst.base());
|
||||
}
|
||||
if (!on_oop) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
return;
|
||||
@ -361,21 +330,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
|
||||
__ cmp(op1, op2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ br(Assembler::EQ, done);
|
||||
// The object may have been evacuated, but we won't see it without a
|
||||
// membar here.
|
||||
__ membar(Assembler::LoadStore| Assembler::LoadLoad);
|
||||
read_barrier(masm, op1);
|
||||
read_barrier(masm, op2);
|
||||
__ cmp(op1, op2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
@ -410,27 +364,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
|
||||
bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
if (is_write) {
|
||||
if (oop_not_null) {
|
||||
write_barrier(masm, obj);
|
||||
} else {
|
||||
Label done;
|
||||
__ cbz(obj, done);
|
||||
write_barrier(masm, obj);
|
||||
__ bind(done);
|
||||
}
|
||||
} else {
|
||||
if (oop_not_null) {
|
||||
read_barrier_not_null(masm, obj);
|
||||
} else {
|
||||
read_barrier(masm, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool is_cae,
|
||||
Register result) {
|
||||
@ -469,8 +402,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register a
|
||||
__ decode_heap_oop(tmp1, tmp1);
|
||||
__ decode_heap_oop(tmp2, tmp2);
|
||||
}
|
||||
read_barrier_impl(masm, tmp1);
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp1);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
__ cmp(tmp1, tmp2);
|
||||
// Retry with expected now being the value we just loaded from addr.
|
||||
__ br(Assembler::EQ, retry);
|
||||
@ -515,7 +448,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
|
||||
void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
|
||||
|
||||
Register obj = stub->obj()->as_register();
|
||||
Register res = stub->result()->as_register();
|
||||
@ -532,7 +465,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
|
||||
__ cbz(res, done);
|
||||
}
|
||||
|
||||
write_barrier(ce->masm(), res);
|
||||
load_reference_barrier_not_null(ce->masm(), res, rscratch1);
|
||||
|
||||
__ bind(done);
|
||||
__ b(*stub->continuation());
|
||||
@ -592,14 +525,14 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
assert(_shenandoah_wb != NULL, "need write barrier stub");
|
||||
return _shenandoah_wb;
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
|
||||
assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
|
||||
return _shenandoah_lrb;
|
||||
}
|
||||
|
||||
#define __ cgen->assembler()->
|
||||
|
||||
// Shenandoah write barrier.
|
||||
// Shenandoah load reference barrier.
|
||||
//
|
||||
// Input:
|
||||
// r0: OOP to evacuate. Not null.
|
||||
@ -608,13 +541,13 @@ address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
// r0: Pointer to evacuated OOP.
|
||||
//
|
||||
// Trash rscratch1, rscratch2. Preserve everything else.
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
|
||||
|
||||
__ align(6);
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
|
||||
address start = __ pc();
|
||||
|
||||
Label work;
|
||||
Label work, done;
|
||||
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
|
||||
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ ldrb(rscratch2, Address(rscratch2, rscratch1));
|
||||
@ -622,19 +555,23 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
__ ret(lr);
|
||||
__ bind(work);
|
||||
|
||||
Register obj = r0;
|
||||
__ mov(rscratch2, r0);
|
||||
resolve_forward_pointer_not_null(cgen->assembler(), r0);
|
||||
__ cmp(rscratch2, r0);
|
||||
__ br(Assembler::NE, done);
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT));
|
||||
__ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
|
||||
__ mov(rscratch1, obj);
|
||||
__ mov(rscratch1, r0);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ mov(obj, rscratch1);
|
||||
__ mov(r0, rscratch1);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ bind(done);
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
@ -643,12 +580,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
#undef __
|
||||
|
||||
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
|
||||
if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
int stub_code_size = 2048;
|
||||
ResourceMark rm;
|
||||
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
|
||||
CodeBuffer buf(bb);
|
||||
StubCodeGenerator cgen(&buf);
|
||||
_shenandoah_wb = generate_shenandoah_wb(&cgen);
|
||||
_shenandoah_lrb = generate_shenandoah_lrb(&cgen);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class ShenandoahLoadReferenceBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
@ -37,7 +37,7 @@ class StubCodeGenerator;
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
static address _shenandoah_lrb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
@ -54,24 +54,21 @@ private:
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2);
|
||||
void resolve_forward_pointer(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
|
||||
void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
address generate_shenandoah_lrb(StubCodeGenerator* cgen);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
static address shenandoah_lrb();
|
||||
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
@ -83,8 +80,6 @@ public:
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
virtual void tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
|
@ -99,6 +99,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
|
||||
__ xchg(access.resolved_addr(), value_opr, result, tmp);
|
||||
|
||||
if (access.is_oop()) {
|
||||
result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
|
@ -45,18 +45,6 @@ encode %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
format %{ "shenandoah_rb $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
__ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
|
||||
instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
|
@ -41,7 +41,7 @@
|
||||
|
||||
#define __ masm->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count) {
|
||||
@ -293,41 +293,23 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahCASBarrier, "should be enabled");
|
||||
Label is_null;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, is_null);
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
|
||||
__ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
write_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
#ifdef _LP64
|
||||
Label done;
|
||||
|
||||
@ -335,8 +317,8 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ jccb(Assembler::zero, done);
|
||||
|
||||
// Heap is unstable, need to perform the read-barrier even if WB is inactive
|
||||
read_barrier_not_null(masm, dst);
|
||||
// Heap is unstable, need to perform the resolve even if LRB is inactive
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
|
||||
__ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ jccb(Assembler::zero, done);
|
||||
@ -345,7 +327,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
|
||||
}
|
||||
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
|
||||
|
||||
if (dst != rax) {
|
||||
__ xchgptr(rax, dst); // Swap back obj with rax.
|
||||
@ -358,24 +340,18 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
storeval_barrier_impl(masm, dst, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
|
||||
assert(ShenandoahStoreValEnqueueBarrier, "should be enabled");
|
||||
|
||||
if (dst == noreg) return;
|
||||
|
||||
#ifdef _LP64
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
Label is_null;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, is_null);
|
||||
write_barrier_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
|
||||
// The set of registers to be saved+restored is the same as in the write-barrier above.
|
||||
// Those are the commonly used registers in the interpreter.
|
||||
__ pusha();
|
||||
@ -389,50 +365,54 @@ void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm,
|
||||
//__ pop_callee_saved_registers();
|
||||
__ popa();
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
Label done;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, done);
|
||||
load_reference_barrier_not_null(masm, dst);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
if (in_heap) {
|
||||
read_barrier_not_null(masm, src.base());
|
||||
}
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
|
||||
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (on_oop) {
|
||||
load_reference_barrier(masm, dst);
|
||||
|
||||
// Generate the SATB pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
shenandoah_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
thread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
if (ShenandoahKeepAliveBarrier && on_reference) {
|
||||
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
// Generate the SATB pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
shenandoah_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
thread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool as_normal = (decorators & AS_NORMAL) != 0;
|
||||
if (in_heap) {
|
||||
write_barrier(masm, dst.base());
|
||||
}
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (on_oop && in_heap) {
|
||||
bool needs_pre_barrier = as_normal;
|
||||
|
||||
Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
|
||||
@ -475,44 +455,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
|
||||
Address obj1, jobject obj2) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
|
||||
Register obj1, jobject obj2) {
|
||||
Unimplemented();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
|
||||
__ cmpptr(op1, op2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ jccb(Assembler::equal, done);
|
||||
read_barrier(masm, op1);
|
||||
read_barrier(masm, op2);
|
||||
__ cmpptr(op1, op2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register src1, Address src2) {
|
||||
__ cmpptr(src1, src2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ jccb(Assembler::equal, done);
|
||||
__ movptr(rscratch2, src2);
|
||||
read_barrier(masm, src1);
|
||||
read_barrier(masm, rscratch2);
|
||||
__ cmpptr(src1, rscratch2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
@ -562,28 +504,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
__ verify_tlab();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
|
||||
bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
if (is_write) {
|
||||
if (oop_not_null) {
|
||||
write_barrier(masm, obj);
|
||||
} else {
|
||||
Label done;
|
||||
__ testptr(obj, obj);
|
||||
__ jcc(Assembler::zero, done);
|
||||
write_barrier(masm, obj);
|
||||
__ bind(done);
|
||||
}
|
||||
} else {
|
||||
if (oop_not_null) {
|
||||
read_barrier_not_null(masm, obj);
|
||||
} else {
|
||||
read_barrier(masm, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Special Shenandoah CAS implementation that handles false negatives
|
||||
// due to concurrent evacuation.
|
||||
#ifndef _LP64
|
||||
@ -622,14 +542,14 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
// Step 2. CAS had failed. This may be a false negative.
|
||||
//
|
||||
// The trouble comes when we compare the to-space pointer with the from-space
|
||||
// pointer to the same object. To resolve this, it will suffice to read both
|
||||
// oldval and the value from memory through the read barriers -- this will give
|
||||
// both to-space pointers. If they mismatch, then it was a legitimate failure.
|
||||
// pointer to the same object. To resolve this, it will suffice to resolve both
|
||||
// oldval and the value from memory -- this will give both to-space pointers.
|
||||
// If they mismatch, then it was a legitimate failure.
|
||||
//
|
||||
if (UseCompressedOops) {
|
||||
__ decode_heap_oop(tmp1);
|
||||
}
|
||||
read_barrier_impl(masm, tmp1);
|
||||
resolve_forward_pointer(masm, tmp1);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ movl(tmp2, oldval);
|
||||
@ -637,7 +557,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
} else {
|
||||
__ movptr(tmp2, oldval);
|
||||
}
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
|
||||
__ cmpptr(tmp1, tmp2);
|
||||
__ jcc(Assembler::notEqual, done, true);
|
||||
@ -646,8 +566,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
//
|
||||
// Corner case: it may happen that somebody stored the from-space pointer
|
||||
// to memory while we were preparing for retry. Therefore, we can fail again
|
||||
// on retry, and so need to do this in loop, always re-reading the failure
|
||||
// witness through the read barrier.
|
||||
// on retry, and so need to do this in loop, always resolving the failure
|
||||
// witness.
|
||||
__ bind(retry);
|
||||
if (os::is_MP()) __ lock();
|
||||
if (UseCompressedOops) {
|
||||
@ -663,7 +583,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
} else {
|
||||
__ movptr(tmp2, oldval);
|
||||
}
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
|
||||
__ cmpptr(tmp1, tmp2);
|
||||
__ jcc(Assembler::equal, retry, true);
|
||||
@ -811,7 +731,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
|
||||
void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Label done;
|
||||
@ -828,7 +748,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
|
||||
__ jcc(Assembler::zero, done);
|
||||
}
|
||||
|
||||
write_barrier(ce->masm(), res);
|
||||
load_reference_barrier_not_null(ce->masm(), res);
|
||||
|
||||
__ bind(done);
|
||||
__ jmp(*stub->continuation());
|
||||
@ -898,16 +818,16 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
assert(_shenandoah_wb != NULL, "need write barrier stub");
|
||||
return _shenandoah_wb;
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
|
||||
assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
|
||||
return _shenandoah_lrb;
|
||||
}
|
||||
|
||||
#define __ cgen->assembler()->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
|
||||
address start = __ pc();
|
||||
|
||||
#ifdef _LP64
|
||||
@ -955,7 +875,7 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
__ push(r15);
|
||||
save_vector_registers(cgen->assembler());
|
||||
__ movptr(rdi, rax);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi);
|
||||
restore_vector_registers(cgen->assembler());
|
||||
__ pop(r15);
|
||||
__ pop(r14);
|
||||
@ -982,12 +902,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
#undef __
|
||||
|
||||
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
|
||||
if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
int stub_code_size = 4096;
|
||||
ResourceMark rm;
|
||||
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
|
||||
CodeBuffer buf(bb);
|
||||
StubCodeGenerator cgen(&buf);
|
||||
_shenandoah_wb = generate_shenandoah_wb(&cgen);
|
||||
_shenandoah_lrb = generate_shenandoah_lrb(&cgen);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class ShenandoahLoadReferenceBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
@ -37,7 +37,7 @@ class StubCodeGenerator;
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
static address _shenandoah_lrb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
@ -55,32 +55,30 @@ private:
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
|
||||
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void load_reference_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
|
||||
void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
address generate_shenandoah_lrb(StubCodeGenerator* cgen);
|
||||
|
||||
void save_vector_registers(MacroAssembler* masm);
|
||||
void restore_vector_registers(MacroAssembler* masm);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
static address shenandoah_lrb();
|
||||
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
void load_reference_barrier(MacroAssembler* masm, Register dst);
|
||||
|
||||
void cmpxchg_oop(MacroAssembler* masm,
|
||||
Register res, Address addr, Register oldval, Register newval,
|
||||
bool exchange, Register tmp1, Register tmp2);
|
||||
@ -93,16 +91,6 @@ public:
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
#ifndef _LP64
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Address obj1, jobject obj2);
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Register obj1, jobject obj2);
|
||||
#endif
|
||||
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
@ -110,8 +98,6 @@ public:
|
||||
Register t1, Register t2,
|
||||
Label& slow_case);
|
||||
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
|
||||
};
|
||||
|
@ -107,6 +107,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
|
||||
__ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
|
||||
|
||||
if (access.is_oop()) {
|
||||
result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
|
@ -23,47 +23,7 @@
|
||||
|
||||
source_hpp %{
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
#include "gc/shenandoah/c2/shenandoahSupport.hpp"
|
||||
%}
|
||||
|
||||
instruct compareAndSwapP_shenandoah(rRegI res,
|
||||
|
@ -777,8 +777,7 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true;
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN"))) return true;
|
||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||
else if ( is_ideal_store() != Form::none ) return true;
|
||||
|
||||
@ -3506,7 +3505,6 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"ClearArray",
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
"ShenandoahReadBarrier",
|
||||
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
|
||||
};
|
||||
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
|
||||
|
@ -46,9 +46,9 @@ void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
bs->gen_pre_barrier_stub(ce, this);
|
||||
}
|
||||
|
||||
void ShenandoahWriteBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->gen_write_barrier_stub(ce, this);
|
||||
bs->gen_load_reference_barrier_stub(ce, this);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
|
||||
@ -105,40 +105,16 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
|
||||
__ branch_destination(slow->continuation());
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (UseShenandoahGC && ShenandoahReadBarrier) {
|
||||
return read_barrier_impl(gen, obj, info, need_null_check);
|
||||
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
return load_reference_barrier_impl(gen, obj, info, need_null_check);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "Should be enabled");
|
||||
LabelObj* done = new LabelObj();
|
||||
LIR_Opr result = gen->new_register(T_OBJECT);
|
||||
__ move(obj, result);
|
||||
if (need_null_check) {
|
||||
__ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
|
||||
__ branch(lir_cond_equal, T_LONG, done->label());
|
||||
}
|
||||
LIR_Address* brooks_ptr_address = gen->generate_address(result, ShenandoahBrooksPointer::byte_offset(), T_ADDRESS);
|
||||
__ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
|
||||
|
||||
__ branch_destination(done->label());
|
||||
return result;
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (UseShenandoahGC && ShenandoahWriteBarrier) {
|
||||
return write_barrier_impl(gen, obj, info, need_null_check);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
|
||||
obj = ensure_in_register(gen, obj);
|
||||
assert(obj->is_register(), "must be a register at this point");
|
||||
@ -168,7 +144,7 @@ LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr ob
|
||||
}
|
||||
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
|
||||
|
||||
CodeStub* slow = new ShenandoahWriteBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
|
||||
CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
|
||||
__ branch(lir_cond_notEqual, T_INT, slow);
|
||||
__ branch_destination(slow->continuation());
|
||||
|
||||
@ -189,58 +165,13 @@ LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr ob
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
|
||||
bool need_null_check = (decorators & IS_NOT_NULL) == 0;
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
obj = write_barrier_impl(gen, obj, info, need_null_check);
|
||||
obj = ensure_in_register(gen, obj);
|
||||
pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
obj = read_barrier_impl(gen, obj, info, true /*need_null_check*/);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_array = (decorators & IS_ARRAY) != 0;
|
||||
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
||||
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
bool needs_null_check = (decorators & IS_NOT_NULL) == 0;
|
||||
|
||||
LIR_Opr base = access.base().item().result();
|
||||
LIR_Opr offset = access.offset().opr();
|
||||
LIRGenerator* gen = access.gen();
|
||||
|
||||
if (is_write) {
|
||||
base = write_barrier(gen, base, access.access_emit_info(), needs_null_check);
|
||||
} else {
|
||||
base = read_barrier(gen, base, access.access_emit_info(), needs_null_check);
|
||||
}
|
||||
|
||||
LIR_Opr addr_opr;
|
||||
if (is_array) {
|
||||
addr_opr = LIR_OprFact::address(gen->emit_array_address(base, offset, access.type()));
|
||||
} else if (needs_patching) {
|
||||
// we need to patch the offset in the instruction so don't allow
|
||||
// generate_address to try to be smart about emitting the -1.
|
||||
// Otherwise the patching code won't know how to find the
|
||||
// instruction to patch.
|
||||
addr_opr = LIR_OprFact::address(new LIR_Address(base, PATCHED_ADDR, access.type()));
|
||||
} else {
|
||||
addr_opr = LIR_OprFact::address(gen->generate_address(base, offset, 0, 0, access.type()));
|
||||
}
|
||||
|
||||
if (resolve_in_register) {
|
||||
LIR_Opr resolved_addr = gen->new_pointer_register();
|
||||
__ leal(addr_opr, resolved_addr);
|
||||
resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
|
||||
return resolved_addr;
|
||||
} else {
|
||||
return addr_opr;
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
@ -252,15 +183,28 @@ void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value)
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
if (!access.is_oop()) {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
return;
|
||||
}
|
||||
|
||||
LIRGenerator *gen = access.gen();
|
||||
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
LIR_Opr tmp = gen->new_register(T_OBJECT);
|
||||
BarrierSetC1::load_at_resolved(access, tmp);
|
||||
tmp = load_reference_barrier(access.gen(), tmp, access.access_emit_info(), true);
|
||||
__ move(tmp, result);
|
||||
} else {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
}
|
||||
|
||||
if (ShenandoahKeepAliveBarrier) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
||||
LIRGenerator *gen = access.gen();
|
||||
if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
|
||||
if (is_weak || is_phantom || is_anonymous) {
|
||||
// Register the value in the referent field with the pre-barrier
|
||||
LabelObj *Lcont_anonymous;
|
||||
if (is_anonymous) {
|
||||
@ -276,19 +220,6 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result)
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
|
||||
return BarrierSetC1::atomic_add_at_resolved(access, value);
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
|
||||
bool is_write = decorators & ACCESS_WRITE;
|
||||
if (is_write) {
|
||||
return write_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
|
||||
} else {
|
||||
return read_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
class ShenandoahWriteBarrierStub: public CodeStub {
|
||||
class ShenandoahLoadReferenceBarrierStub: public CodeStub {
|
||||
friend class ShenandoahBarrierSetC1;
|
||||
private:
|
||||
LIR_Opr _obj;
|
||||
@ -94,7 +94,7 @@ private:
|
||||
bool _needs_null_check;
|
||||
|
||||
public:
|
||||
ShenandoahWriteBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
|
||||
ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
|
||||
_obj(obj), _result(result), _info(info), _needs_null_check(needs_null_check)
|
||||
{
|
||||
assert(_obj->is_register(), "should be register");
|
||||
@ -113,7 +113,7 @@ public:
|
||||
visitor->do_temp(_result);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void print_name(outputStream* out) const { out->print("ShenandoahWritePreBarrierStub"); }
|
||||
virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); }
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
@ -181,12 +181,10 @@ private:
|
||||
|
||||
void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val);
|
||||
|
||||
LIR_Opr read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators);
|
||||
|
||||
LIR_Opr read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
|
||||
LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj);
|
||||
|
||||
@ -194,7 +192,6 @@ public:
|
||||
CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
|
||||
|
||||
protected:
|
||||
virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
|
||||
|
||||
virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
|
||||
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
|
||||
@ -202,10 +199,8 @@ protected:
|
||||
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
|
||||
|
||||
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
|
||||
virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value);
|
||||
|
||||
public:
|
||||
virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
|
||||
|
||||
virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob);
|
||||
};
|
||||
|
@ -43,121 +43,56 @@ ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
|
||||
}
|
||||
|
||||
ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
|
||||
: _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8, 0, NULL)) {
|
||||
: _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)),
|
||||
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) {
|
||||
}
|
||||
|
||||
int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
|
||||
return _shenandoah_barriers->length();
|
||||
int ShenandoahBarrierSetC2State::enqueue_barriers_count() const {
|
||||
return _enqueue_barriers->length();
|
||||
}
|
||||
|
||||
ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
|
||||
return _shenandoah_barriers->at(idx);
|
||||
ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const {
|
||||
return _enqueue_barriers->at(idx);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
|
||||
assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_shenandoah_barriers->append(n);
|
||||
void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
|
||||
assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_enqueue_barriers->append(n);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
|
||||
if (_shenandoah_barriers->contains(n)) {
|
||||
_shenandoah_barriers->remove(n);
|
||||
void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
|
||||
if (_enqueue_barriers->contains(n)) {
|
||||
_enqueue_barriers->remove(n);
|
||||
}
|
||||
}
|
||||
|
||||
#define __ kit->
|
||||
int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
|
||||
return _load_reference_barriers->length();
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahReadBarrier) {
|
||||
obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
|
||||
ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
|
||||
return _load_reference_barriers->at(idx);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
|
||||
assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_load_reference_barriers->append(n);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
|
||||
if (_load_reference_barriers->contains(n)) {
|
||||
_load_reference_barriers->remove(n);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
obj = shenandoah_write_barrier(kit, obj);
|
||||
obj = shenandoah_enqueue_barrier(kit, obj);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
|
||||
const Type* obj_type = obj->bottom_type();
|
||||
if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
|
||||
return obj;
|
||||
}
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
|
||||
Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
|
||||
|
||||
if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
|
||||
// We know it is null, no barrier needed.
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
|
||||
|
||||
// We don't know if it's null or not. Need null-check.
|
||||
enum { _not_null_path = 1, _null_path, PATH_LIMIT };
|
||||
RegionNode* region = new RegionNode(PATH_LIMIT);
|
||||
Node* phi = new PhiNode(region, obj_type);
|
||||
Node* null_ctrl = __ top();
|
||||
Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
|
||||
|
||||
region->init_req(_null_path, null_ctrl);
|
||||
phi ->init_req(_null_path, __ zerocon(T_OBJECT));
|
||||
|
||||
Node* ctrl = use_ctrl ? __ control() : NULL;
|
||||
ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
|
||||
Node* n = __ gvn().transform(rb);
|
||||
|
||||
region->init_req(_not_null_path, __ control());
|
||||
phi ->init_req(_not_null_path, n);
|
||||
|
||||
__ set_control(__ gvn().transform(region));
|
||||
__ record_for_igvn(region);
|
||||
return __ gvn().transform(phi);
|
||||
|
||||
} else {
|
||||
// We know it is not null. Simple barrier is sufficient.
|
||||
Node* ctrl = use_ctrl ? __ control() : NULL;
|
||||
ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
|
||||
Node* n = __ gvn().transform(rb);
|
||||
__ record_for_igvn(n);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
|
||||
ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
|
||||
Node* n = __ gvn().transform(wb);
|
||||
if (n == wb) { // New barrier needs memory projection.
|
||||
Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
|
||||
__ set_memory(proj, adr_type);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
obj = shenandoah_write_barrier_impl(kit, obj);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
|
||||
if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
|
||||
return obj;
|
||||
}
|
||||
const Type* obj_type = obj->bottom_type();
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
|
||||
Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
|
||||
__ record_for_igvn(n);
|
||||
return n;
|
||||
}
|
||||
#define __ kit->
|
||||
|
||||
bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
|
||||
BasicType bt, uint adr_idx) const {
|
||||
@ -304,7 +239,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
|
||||
Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
|
||||
marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
|
||||
assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
|
||||
assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
|
||||
|
||||
// if (!marking)
|
||||
__ if_then(marking, BoolTest::ne, zero, unlikely); {
|
||||
@ -361,7 +296,7 @@ bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
|
||||
|
||||
bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) {
|
||||
return call->is_CallLeaf() &&
|
||||
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT);
|
||||
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
|
||||
@ -549,88 +484,6 @@ const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const {
|
||||
const TypePtr* adr_type = access.addr().type();
|
||||
|
||||
if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) {
|
||||
int off = adr_type->is_ptr()->offset();
|
||||
int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() :
|
||||
arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type());
|
||||
assert(off != Type::OffsetTop, "unexpected offset");
|
||||
if (off == Type::OffsetBot || off >= base_off) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
||||
GraphKit* kit = NULL;
|
||||
if (access.is_parse_access()) {
|
||||
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||
kit = parse_access.kit();
|
||||
}
|
||||
Node* adr = access.addr().node();
|
||||
assert(adr->is_AddP(), "unexpected address shape");
|
||||
Node* base = adr->in(AddPNode::Base);
|
||||
|
||||
if (is_write) {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_write_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone");
|
||||
}
|
||||
} else {
|
||||
if (adr_type->isa_instptr()) {
|
||||
Compile* C = access.gvn().C;
|
||||
ciField* field = C->alias_type(adr_type)->field();
|
||||
|
||||
// Insert read barrier for Shenandoah.
|
||||
if (field != NULL &&
|
||||
((ShenandoahOptimizeStaticFinals && field->is_static() && field->is_final()) ||
|
||||
(ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
|
||||
(ShenandoahOptimizeStableFinals && field->is_stable()))) {
|
||||
// Skip the barrier for special fields
|
||||
} else {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_read_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_read_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (base != adr->in(AddPNode::Base)) {
|
||||
assert(kit != NULL, "no barrier should have been added");
|
||||
|
||||
Node* address = adr->in(AddPNode::Address);
|
||||
|
||||
if (address->is_AddP()) {
|
||||
assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape");
|
||||
assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
address = address->clone();
|
||||
address->set_req(AddPNode::Base, base);
|
||||
address->set_req(AddPNode::Address, base);
|
||||
address = kit->gvn().transform(address);
|
||||
} else {
|
||||
assert(address == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
address = base;
|
||||
}
|
||||
adr = adr->clone();
|
||||
adr->set_req(AddPNode::Base, base);
|
||||
adr->set_req(AddPNode::Address, address);
|
||||
adr = kit->gvn().transform(adr);
|
||||
access.addr().set_node(adr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
|
||||
@ -662,44 +515,8 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
|
||||
PhaseGVN& gvn = opt_access.gvn();
|
||||
MergeMemNode* mm = opt_access.mem();
|
||||
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
RegionNode* region = new RegionNode(3);
|
||||
const Type* v_t = gvn.type(val.node());
|
||||
Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t);
|
||||
Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT)));
|
||||
Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne));
|
||||
IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN);
|
||||
|
||||
gvn.transform(iff);
|
||||
if (gvn.is_IterGVN()) {
|
||||
gvn.is_IterGVN()->_worklist.push(iff);
|
||||
} else {
|
||||
gvn.record_for_igvn(iff);
|
||||
}
|
||||
|
||||
Node* null_true = gvn.transform(new IfFalseNode(iff));
|
||||
Node* null_false = gvn.transform(new IfTrueNode(iff));
|
||||
region->init_req(1, null_true);
|
||||
region->init_req(2, null_false);
|
||||
phi->init_req(1, gvn.zerocon(T_OBJECT));
|
||||
Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL));
|
||||
cast->set_req(0, null_false);
|
||||
cast = gvn.transform(cast);
|
||||
Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false));
|
||||
phi->init_req(2, rb);
|
||||
opt_access.set_ctl(gvn.transform(region));
|
||||
val.set_node(gvn.transform(phi));
|
||||
}
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node()));
|
||||
int alias = gvn.C->get_alias_index(adr_type);
|
||||
Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node());
|
||||
Node* wb_transformed = gvn.transform(wb);
|
||||
Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed));
|
||||
if (wb_transformed == wb) {
|
||||
Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb));
|
||||
mm->set_memory_at(alias, proj);
|
||||
}
|
||||
Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
|
||||
val.set_node(enqueue);
|
||||
}
|
||||
}
|
||||
@ -724,6 +541,17 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
|
||||
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
|
||||
Node* load = BarrierSetC2::load_at_resolved(access, val_type);
|
||||
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
load = new ShenandoahLoadReferenceBarrierNode(NULL, load);
|
||||
if (access.is_parse_access()) {
|
||||
load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
|
||||
} else {
|
||||
load = static_cast<C2OptAccess &>(access).gvn().transform(load);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are reading the value of the referent field of a Reference
|
||||
// object (either by using Unsafe directly or through reflection)
|
||||
// then, if SATB is enabled, we need to record the referent in an
|
||||
@ -797,9 +625,10 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
|
||||
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
|
||||
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
|
||||
}
|
||||
#endif
|
||||
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store));
|
||||
return load_store;
|
||||
}
|
||||
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
|
||||
@ -867,6 +696,7 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
|
||||
}
|
||||
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
|
||||
if (access.is_oop()) {
|
||||
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result));
|
||||
shenandoah_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL, NULL, max_juint, NULL, NULL,
|
||||
result /* pre_val */, T_OBJECT);
|
||||
@ -876,19 +706,9 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
|
||||
|
||||
void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
|
||||
assert(!src->is_AddP(), "unexpected input");
|
||||
src = shenandoah_read_barrier(kit, src);
|
||||
BarrierSetC2::clone(kit, src, dst, size, is_array);
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const {
|
||||
bool is_write = decorators & ACCESS_WRITE;
|
||||
if (is_write) {
|
||||
return shenandoah_write_barrier(kit, n);
|
||||
} else {
|
||||
return shenandoah_read_barrier(kit, n);
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
|
||||
Node*& i_o, Node*& needgc_ctrl,
|
||||
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
|
||||
@ -915,6 +735,7 @@ Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl,
|
||||
|
||||
// Support for GC barriers emitted during parsing
|
||||
bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true;
|
||||
if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
|
||||
return false;
|
||||
}
|
||||
@ -929,26 +750,30 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
|
||||
return ShenandoahBarrierNode::skip_through_barrier(c);
|
||||
if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
|
||||
}
|
||||
if (c->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
c = c->in(1);
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
|
||||
return !ShenandoahWriteBarrierNode::expand(C, igvn);
|
||||
return !ShenandoahBarrierC2Support::expand(C, igvn);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
|
||||
if (mode == LoopOptsShenandoahExpand) {
|
||||
assert(UseShenandoahGC, "only for shenandoah");
|
||||
ShenandoahWriteBarrierNode::pin_and_expand(phase);
|
||||
ShenandoahBarrierC2Support::pin_and_expand(phase);
|
||||
return true;
|
||||
} else if (mode == LoopOptsShenandoahPostExpand) {
|
||||
assert(UseShenandoahGC, "only for shenandoah");
|
||||
visited.Clear();
|
||||
ShenandoahWriteBarrierNode::optimize_after_expansion(visited, nstack, worklist, phase);
|
||||
ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
|
||||
return true;
|
||||
}
|
||||
GrowableArray<MemoryGraphFixer*> memory_graph_fixers;
|
||||
ShenandoahWriteBarrierNode::optimize_before_expansion(phase, memory_graph_fixers, false);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -957,7 +782,6 @@ bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_couple
|
||||
if (!is_oop) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tightly_coupled_alloc) {
|
||||
if (phase == Optimization) {
|
||||
return false;
|
||||
@ -985,7 +809,7 @@ bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIte
|
||||
}
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (src_type->isa_aryptr()) {
|
||||
BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type();
|
||||
if (src_elem == T_OBJECT || src_elem == T_ARRAY) {
|
||||
@ -1038,14 +862,20 @@ void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node*
|
||||
|
||||
// Support for macro expanded GC barriers
|
||||
void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
|
||||
if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
|
||||
}
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
|
||||
if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
|
||||
}
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1091,19 +921,18 @@ void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &use
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
|
||||
ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
|
||||
for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i);
|
||||
if (!useful.member(n)) {
|
||||
state()->remove_shenandoah_barrier(n);
|
||||
state()->remove_enqueue_barrier(n);
|
||||
}
|
||||
}
|
||||
for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
|
||||
if (!useful.member(n)) {
|
||||
state()->remove_load_reference_barrier(n);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::has_special_unique_user(const Node* node) const {
|
||||
assert(node->outcnt() == 1, "match only for unique out");
|
||||
Node* n = node->unique_out();
|
||||
return node->Opcode() == Op_ShenandoahWriteBarrier && n->Opcode() == Op_ShenandoahWBMemProj;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
|
||||
@ -1123,7 +952,7 @@ bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
|
||||
#ifdef ASSERT
|
||||
void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
|
||||
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) {
|
||||
ShenandoahBarrierNode::verify(Compile::current()->root());
|
||||
ShenandoahBarrierC2Support::verify(Compile::current()->root());
|
||||
} else if (phase == BarrierSetC2::BeforeCodeGen) {
|
||||
// Verify G1 pre-barriers
|
||||
const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
|
||||
@ -1229,7 +1058,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
}
|
||||
} else if (can_reshape &&
|
||||
n->Opcode() == Op_If &&
|
||||
ShenandoahWriteBarrierNode::is_heap_stable_test(n) &&
|
||||
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
|
||||
n->in(0) != NULL) {
|
||||
Node* dom = n->in(0);
|
||||
Node* prev_dom = n;
|
||||
@ -1237,7 +1066,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
int dist = 16;
|
||||
// Search up the dominator tree for another heap stable test
|
||||
while (dom->Opcode() != op || // Not same opcode?
|
||||
!ShenandoahWriteBarrierNode::is_heap_stable_test(dom) || // Not same input 1?
|
||||
!ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1?
|
||||
prev_dom->in(0) != dom) { // One path of test does not dominate?
|
||||
if (dist < 0) return NULL;
|
||||
|
||||
@ -1258,46 +1087,6 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const {
|
||||
if (n->is_Load()) {
|
||||
Node *mem = n->in(MemNode::Memory);
|
||||
Node *value = n->as_Load()->can_see_stored_value(mem, phase);
|
||||
if (value) {
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (igvn != NULL &&
|
||||
value->is_Phi() &&
|
||||
value->req() > 2 &&
|
||||
value->in(1) != NULL &&
|
||||
value->in(1)->is_ShenandoahBarrier()) {
|
||||
if (igvn->_worklist.member(value) ||
|
||||
igvn->_worklist.member(value->in(0)) ||
|
||||
(value->in(0)->in(1) != NULL &&
|
||||
value->in(0)->in(1)->is_IfProj() &&
|
||||
(igvn->_worklist.member(value->in(0)->in(1)) ||
|
||||
(value->in(0)->in(1)->in(0) != NULL &&
|
||||
igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
|
||||
igvn->_worklist.push(n);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
// (This works even when value is a Con, but LoadNode::Value
|
||||
// usually runs first, producing the singleton type of the Con.)
|
||||
Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
|
||||
if (value->Opcode() == Op_EncodeP) {
|
||||
if (value_no_barrier != value->in(1)) {
|
||||
Node *encode = value->clone();
|
||||
encode->set_req(1, value_no_barrier);
|
||||
encode = phase->transform(encode);
|
||||
return encode;
|
||||
}
|
||||
} else {
|
||||
return value_no_barrier;
|
||||
}
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = n->fast_out(i);
|
||||
@ -1308,20 +1097,6 @@ bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
|
||||
return n->outcnt() > 0;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const {
|
||||
int offset = adr_type->offset();
|
||||
if (offset == ShenandoahBrooksPointer::byte_offset()) {
|
||||
if (adr_type->isa_aryptr()) {
|
||||
adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset);
|
||||
} else if (adr_type->isa_instptr()) {
|
||||
adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
|
||||
switch (opcode) {
|
||||
case Op_CallLeaf:
|
||||
@ -1356,9 +1131,7 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
case Op_ShenandoahReadBarrier:
|
||||
return true;
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
assert(false, "should have been expanded already");
|
||||
return true;
|
||||
default:
|
||||
@ -1366,17 +1139,6 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const {
|
||||
if (offset == ShenandoahBrooksPointer::byte_offset() &&
|
||||
(adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
|
||||
switch (opcode) {
|
||||
case Op_ShenandoahCompareAndExchangeP:
|
||||
@ -1412,15 +1174,12 @@ bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph
|
||||
}
|
||||
return false;
|
||||
}
|
||||
case Op_ShenandoahReadBarrier:
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
// Barriers 'pass through' its arguments. I.e. what goes in, comes out.
|
||||
// It doesn't escape.
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist);
|
||||
break;
|
||||
case Op_ShenandoahEnqueueBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
|
||||
break;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
|
||||
return true;
|
||||
default:
|
||||
// Nothing
|
||||
break;
|
||||
@ -1441,15 +1200,12 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
|
||||
case Op_ShenandoahWeakCompareAndSwapP:
|
||||
case Op_ShenandoahWeakCompareAndSwapN:
|
||||
return conn_graph->add_final_edges_unsafe_access(n, opcode);
|
||||
case Op_ShenandoahReadBarrier:
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
// Barriers 'pass through' its arguments. I.e. what goes in, comes out.
|
||||
// It doesn't escape.
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL);
|
||||
return true;
|
||||
case Op_ShenandoahEnqueueBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
|
||||
return true;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
|
||||
return true;
|
||||
default:
|
||||
// Nothing
|
||||
break;
|
||||
@ -1464,21 +1220,7 @@ bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const {
|
||||
return n->is_ShenandoahBarrier();
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
|
||||
switch (opcode) {
|
||||
case Op_ShenandoahReadBarrier:
|
||||
if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) {
|
||||
matcher->set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1));
|
||||
}
|
||||
matcher->set_shared(n);
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
return n->Opcode() == Op_ShenandoahLoadReferenceBarrier;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
|
||||
@ -1510,62 +1252,3 @@ bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) co
|
||||
xop == Op_ShenandoahCompareAndSwapN ||
|
||||
xop == Op_ShenandoahCompareAndSwapP;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {
|
||||
if (use->is_ShenandoahBarrier()) {
|
||||
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
||||
Node* u = use->fast_out(i2);
|
||||
Node* cmp = use->find_out_with(Op_CmpP);
|
||||
if (u->Opcode() == Op_CmpP) {
|
||||
igvn->_worklist.push(cmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {
|
||||
if (use->is_ShenandoahBarrier()) {
|
||||
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
||||
Node* p = use->fast_out(i2);
|
||||
if (p->Opcode() == Op_AddP) {
|
||||
for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
|
||||
Node* q = p->fast_out(i3);
|
||||
if (q->is_Load()) {
|
||||
if(q->bottom_type() != ccp->type(q)) {
|
||||
worklist.push(q);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::split_if_pre(PhaseIdealLoop* phase, Node* n) const {
|
||||
if (n->Opcode() == Op_ShenandoahReadBarrier) {
|
||||
((ShenandoahReadBarrierNode*)n)->try_move(phase);
|
||||
} else if (n->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
return ((ShenandoahWriteBarrierNode*)n)->try_split_thru_phi(phase);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::build_loop_late_post(PhaseIdealLoop* phase, Node* n) const {
|
||||
return ShenandoahBarrierNode::build_loop_late_post(phase, n);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const {
|
||||
if (n->is_ShenandoahBarrier()) {
|
||||
return x->as_ShenandoahBarrier()->sink_node(phase, x_ctrl, n_ctrl);
|
||||
}
|
||||
if (n->is_MergeMem()) {
|
||||
// PhaseIdealLoop::split_if_with_blocks_post() would:
|
||||
// _igvn._worklist.yank(x);
|
||||
// which sometimes causes chains of MergeMem which some of
|
||||
// shenandoah specific code doesn't support
|
||||
phase->register_new_node(x, x_ctrl);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -30,14 +30,21 @@
|
||||
|
||||
class ShenandoahBarrierSetC2State : public ResourceObj {
|
||||
private:
|
||||
GrowableArray<ShenandoahWriteBarrierNode*>* _shenandoah_barriers;
|
||||
GrowableArray<ShenandoahEnqueueBarrierNode*>* _enqueue_barriers;
|
||||
GrowableArray<ShenandoahLoadReferenceBarrierNode*>* _load_reference_barriers;
|
||||
|
||||
public:
|
||||
ShenandoahBarrierSetC2State(Arena* comp_arena);
|
||||
int shenandoah_barriers_count() const;
|
||||
ShenandoahWriteBarrierNode* shenandoah_barrier(int idx) const;
|
||||
void add_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
|
||||
void remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
|
||||
|
||||
int enqueue_barriers_count() const;
|
||||
ShenandoahEnqueueBarrierNode* enqueue_barrier(int idx) const;
|
||||
void add_enqueue_barrier(ShenandoahEnqueueBarrierNode* n);
|
||||
void remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n);
|
||||
|
||||
int load_reference_barriers_count() const;
|
||||
ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const;
|
||||
void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n);
|
||||
void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n);
|
||||
};
|
||||
|
||||
class ShenandoahBarrierSetC2 : public BarrierSetC2 {
|
||||
@ -66,12 +73,7 @@ private:
|
||||
BasicType bt) const;
|
||||
|
||||
Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const;
|
||||
Node* shenandoah_read_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_write_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const;
|
||||
Node* shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const;
|
||||
|
||||
void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
|
||||
Node* pre_val, bool need_mem_bar) const;
|
||||
@ -79,7 +81,6 @@ private:
|
||||
static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn);
|
||||
|
||||
protected:
|
||||
virtual void resolve_address(C2Access& access) const;
|
||||
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
||||
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
||||
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||
@ -102,12 +103,11 @@ public:
|
||||
static const TypeFunc* write_ref_field_pre_entry_Type();
|
||||
static const TypeFunc* shenandoah_clone_barrier_Type();
|
||||
static const TypeFunc* shenandoah_write_barrier_Type();
|
||||
virtual bool has_load_barriers() const { return true; }
|
||||
|
||||
// This is the entry-point for the backend to perform accesses through the Access API.
|
||||
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
|
||||
|
||||
virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const;
|
||||
|
||||
virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
|
||||
Node*& i_o, Node*& needgc_ctrl,
|
||||
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
|
||||
@ -144,13 +144,7 @@ public:
|
||||
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
|
||||
#endif
|
||||
|
||||
virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const;
|
||||
#ifdef ASSERT
|
||||
virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const;
|
||||
#endif
|
||||
|
||||
virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const;
|
||||
virtual Node* identity_node(PhaseGVN* phase, Node* n) const;
|
||||
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
|
||||
|
||||
virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
|
||||
@ -158,17 +152,8 @@ public:
|
||||
virtual bool escape_has_out_with_unsafe_object(Node* n) const;
|
||||
virtual bool escape_is_barrier_node(Node* n) const;
|
||||
|
||||
virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
|
||||
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
|
||||
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const;
|
||||
|
||||
virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const;
|
||||
virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const;
|
||||
|
||||
virtual bool has_special_unique_user(const Node* node) const;
|
||||
virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const;
|
||||
virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const;
|
||||
virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -36,10 +36,8 @@
|
||||
class PhaseGVN;
|
||||
class MemoryGraphFixer;
|
||||
|
||||
class ShenandoahBarrierNode : public TypeNode {
|
||||
class ShenandoahBarrierC2Support : public AllStatic {
|
||||
private:
|
||||
bool _allow_fromspace;
|
||||
|
||||
#ifdef ASSERT
|
||||
enum verify_type {
|
||||
ShenandoahLoad,
|
||||
@ -50,204 +48,49 @@ private:
|
||||
};
|
||||
|
||||
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
|
||||
#endif
|
||||
|
||||
public:
|
||||
enum { Control,
|
||||
Memory,
|
||||
ValueIn
|
||||
};
|
||||
|
||||
ShenandoahBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
|
||||
: TypeNode(obj->bottom_type()->isa_oopptr() ? obj->bottom_type()->is_oopptr()->cast_to_nonconst() : obj->bottom_type(), 3),
|
||||
_allow_fromspace(allow_fromspace) {
|
||||
|
||||
init_req(Control, ctrl);
|
||||
init_req(Memory, mem);
|
||||
init_req(ValueIn, obj);
|
||||
|
||||
init_class_id(Class_ShenandoahBarrier);
|
||||
}
|
||||
|
||||
static Node* skip_through_barrier(Node* n);
|
||||
|
||||
static const TypeOopPtr* brooks_pointer_type(const Type* t) {
|
||||
return t->is_oopptr()->cast_to_nonconst()->add_offset(ShenandoahBrooksPointer::byte_offset())->is_oopptr();
|
||||
}
|
||||
|
||||
virtual const TypePtr* adr_type() const {
|
||||
if (bottom_type() == Type::TOP) {
|
||||
return NULL;
|
||||
}
|
||||
//const TypePtr* adr_type = in(MemNode::Address)->bottom_type()->is_ptr();
|
||||
const TypePtr* adr_type = brooks_pointer_type(bottom_type());
|
||||
assert(adr_type->offset() == ShenandoahBrooksPointer::byte_offset(), "sane offset");
|
||||
assert(Compile::current()->alias_type(adr_type)->is_rewritable(), "brooks ptr must be rewritable");
|
||||
return adr_type;
|
||||
}
|
||||
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual uint match_edge(uint idx) const {
|
||||
return idx >= ValueIn;
|
||||
}
|
||||
|
||||
Node* Identity_impl(PhaseGVN* phase);
|
||||
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
virtual bool depends_only_on_test() const {
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace);
|
||||
|
||||
#ifdef ASSERT
|
||||
static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
|
||||
static void verify(RootNode* root);
|
||||
static void verify_raw_mem(RootNode* root);
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
|
||||
// protected:
|
||||
static Node* dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
|
||||
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
|
||||
static bool is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
|
||||
static bool build_loop_late_post(PhaseIdealLoop* phase, Node* n);
|
||||
bool sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl);
|
||||
|
||||
protected:
|
||||
uint hash() const;
|
||||
bool cmp(const Node& n) const;
|
||||
uint size_of() const;
|
||||
|
||||
private:
|
||||
static bool needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited);
|
||||
|
||||
static bool dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
|
||||
static bool dominates_memory_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
|
||||
};
|
||||
|
||||
class ShenandoahReadBarrierNode : public ShenandoahBarrierNode {
|
||||
public:
|
||||
ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj)
|
||||
: ShenandoahBarrierNode(ctrl, mem, obj, true) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
|
||||
ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
|
||||
"should be enabled");
|
||||
}
|
||||
ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
|
||||
: ShenandoahBarrierNode(ctrl, mem, obj, allow_fromspace) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
|
||||
ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
|
||||
"should be enabled");
|
||||
}
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
virtual int Opcode() const;
|
||||
|
||||
bool is_independent(Node* mem);
|
||||
|
||||
void try_move(PhaseIdealLoop* phase);
|
||||
|
||||
private:
|
||||
static bool is_independent(const Type* in_type, const Type* this_type);
|
||||
static bool dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
|
||||
static bool dominates_memory_rb_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
|
||||
};
|
||||
|
||||
class ShenandoahWriteBarrierNode : public ShenandoahBarrierNode {
|
||||
public:
|
||||
ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static bool is_gc_state_load(Node *n);
|
||||
static bool is_heap_state_test(Node* iff, int mask);
|
||||
static bool is_heap_stable_test(Node* iff);
|
||||
static bool try_common_gc_state_load(Node *n, PhaseIdealLoop *phase);
|
||||
static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase);
|
||||
|
||||
static LoopNode* try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase);
|
||||
static Node* move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase);
|
||||
#ifdef ASSERT
|
||||
static bool memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
|
||||
static void memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase);
|
||||
#endif
|
||||
void try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
|
||||
void try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
|
||||
void pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses);
|
||||
void pin_and_expand_helper(PhaseIdealLoop* phase);
|
||||
static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase);
|
||||
static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase);
|
||||
static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
|
||||
|
||||
static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
|
||||
PhaseIdealLoop* phase);
|
||||
static void call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
|
||||
Node* raw_mem, Node* wb_mem, int alias,
|
||||
PhaseIdealLoop* phase);
|
||||
static void call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase);
|
||||
static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses,
|
||||
PhaseIdealLoop* phase);
|
||||
static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
|
||||
|
||||
static void optimize_after_expansion(VectorSet &visited, Node_Stack &nstack, Node_List &old_new, PhaseIdealLoop* phase);
|
||||
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
|
||||
static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
|
||||
|
||||
static void optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm);
|
||||
Node* would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase);
|
||||
static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase);
|
||||
|
||||
Node* try_split_thru_phi(PhaseIdealLoop* phase);
|
||||
};
|
||||
|
||||
class ShenandoahWBMemProjNode : public Node {
|
||||
public:
|
||||
enum { Control,
|
||||
WriteBarrier };
|
||||
static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
|
||||
ShenandoahWBMemProjNode(Node *src) : Node(NULL, src) {
|
||||
assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
|
||||
assert(src->Opcode() == Op_ShenandoahWriteBarrier || src->is_Mach(), "epxect wb");
|
||||
}
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
static bool is_gc_state_load(Node* n);
|
||||
static bool is_heap_stable_test(Node* iff);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual const Type *bottom_type() const {return Type::MEMORY;}
|
||||
virtual const TypePtr *adr_type() const {
|
||||
Node* wb = in(WriteBarrier);
|
||||
if (wb == NULL || wb->is_top()) return NULL; // node is dead
|
||||
assert(wb->Opcode() == Op_ShenandoahWriteBarrier || (wb->is_Mach() && wb->as_Mach()->ideal_Opcode() == Op_ShenandoahWriteBarrier) || wb->is_Phi(), "expect wb");
|
||||
return ShenandoahBarrierNode::brooks_pointer_type(wb->bottom_type());
|
||||
}
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
static void optimize_after_expansion(VectorSet& visited, Node_Stack& nstack, Node_List& old_new, PhaseIdealLoop* phase);
|
||||
|
||||
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
|
||||
virtual const Type *Value(PhaseGVN* phase ) const {
|
||||
return bottom_type();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const {};
|
||||
#ifdef ASSERT
|
||||
static void verify(RootNode* root);
|
||||
#endif
|
||||
};
|
||||
|
||||
class ShenandoahEnqueueBarrierNode : public Node {
|
||||
public:
|
||||
ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
|
||||
}
|
||||
ShenandoahEnqueueBarrierNode(Node* val);
|
||||
|
||||
const Type *bottom_type() const;
|
||||
const Type* Value(PhaseGVN* phase) const;
|
||||
@ -289,7 +132,6 @@ public:
|
||||
Node* find_mem(Node* ctrl, Node* n) const;
|
||||
void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses);
|
||||
int alias() const { return _alias; }
|
||||
void remove(Node* n);
|
||||
};
|
||||
|
||||
class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode {
|
||||
@ -382,4 +224,41 @@ public:
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class ShenandoahLoadReferenceBarrierNode : public Node {
|
||||
public:
|
||||
enum {
|
||||
Control,
|
||||
ValueIn
|
||||
};
|
||||
|
||||
enum Strength {
|
||||
NONE, WEAK, STRONG, NA
|
||||
};
|
||||
|
||||
ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const;
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
virtual const class TypePtr *adr_type() const { return TypeOopPtr::BOTTOM; }
|
||||
virtual uint match_edge(uint idx) const {
|
||||
return idx >= ValueIn;
|
||||
}
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
|
||||
uint size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
Strength get_barrier_strength();
|
||||
CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
|
||||
|
||||
private:
|
||||
bool needs_barrier(PhaseGVN* phase, Node* n);
|
||||
bool needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited);
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP
|
||||
|
@ -41,13 +41,10 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -47,13 +47,10 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeu
|
||||
}
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -42,13 +42,10 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristic
|
||||
SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold, 10);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -43,14 +43,11 @@ ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics() : ShenandoahHeuristic
|
||||
}
|
||||
|
||||
// Disable known barriers by default.
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
|
||||
|
||||
// Final configuration checks
|
||||
|
@ -40,13 +40,10 @@ ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics(
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,6 @@ ShenandoahTraversalHeuristics::ShenandoahTraversalHeuristics() : ShenandoahHeuri
|
||||
_last_cset_select(0)
|
||||
{
|
||||
FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
|
||||
FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false);
|
||||
@ -53,11 +52,9 @@ ShenandoahTraversalHeuristics::ShenandoahTraversalHeuristics() : ShenandoahHeuri
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValEnqueueBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -46,12 +46,8 @@ void ShenandoahArguments::initialize() {
|
||||
|
||||
FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahWriteBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahReadBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahCASBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahAcmpBarrier, false);
|
||||
FLAG_SET_DEFAULT(ShenandoahCloneBarrier, false);
|
||||
#endif
|
||||
|
||||
@ -111,12 +107,8 @@ void ShenandoahArguments::initialize() {
|
||||
if (ShenandoahVerifyOptoBarriers &&
|
||||
(!FLAG_IS_DEFAULT(ShenandoahSATBBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahKeepAliveBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahWriteBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahReadBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahStoreValReadBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahCASBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahAcmpBarrier) ||
|
||||
!FLAG_IS_DEFAULT(ShenandoahCloneBarrier)
|
||||
)) {
|
||||
warning("Unusual barrier configuration, disabling C2 barrier verification");
|
||||
@ -164,13 +156,6 @@ void ShenandoahArguments::initialize() {
|
||||
FLAG_SET_DEFAULT(UseAOT, false);
|
||||
}
|
||||
|
||||
// JNI fast get field stuff is not currently supported by Shenandoah.
|
||||
// It would introduce another heap memory access for reading the forwarding
|
||||
// pointer, which would have to be guarded by the signal handler machinery.
|
||||
// See:
|
||||
// http://mail.openjdk.java.net/pipermail/hotspot-dev/2018-June/032763.html
|
||||
FLAG_SET_DEFAULT(UseFastJNIAccessors, false);
|
||||
|
||||
// TLAB sizing policy makes resizing decisions before each GC cycle. It averages
|
||||
// historical data, assigning more recent data the weight according to TLABAllocationWeight.
|
||||
// Current default is good for generational collectors that run frequent young GCs.
|
||||
|
@ -218,31 +218,25 @@ void ShenandoahBarrierSet::write_region(MemRegion mr) {
|
||||
}
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::read_barrier(oop src) {
|
||||
// Check for forwarded objects, because on Full GC path we might deal with
|
||||
// non-trivial fwdptrs that contain Full GC specific metadata. We could check
|
||||
// for is_full_gc_in_progress(), but this also covers the case of stable heap,
|
||||
// which provides a bit of performance improvement.
|
||||
if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
|
||||
return ShenandoahBarrierSet::resolve_forwarded(src);
|
||||
oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
|
||||
if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
|
||||
return load_reference_barrier_impl(obj);
|
||||
} else {
|
||||
return src;
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
|
||||
bool eq = oopDesc::equals_raw(obj1, obj2);
|
||||
if (! eq && ShenandoahAcmpBarrier) {
|
||||
OrderAccess::loadload();
|
||||
obj1 = resolve_forwarded(obj1);
|
||||
obj2 = resolve_forwarded(obj2);
|
||||
eq = oopDesc::equals_raw(obj1, obj2);
|
||||
oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
|
||||
if (obj != NULL) {
|
||||
return load_reference_barrier_not_null(obj);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
return eq;
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
|
||||
assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
|
||||
|
||||
oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {
|
||||
assert(ShenandoahLoadRefBarrier, "should be enabled");
|
||||
assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
|
||||
shenandoah_assert_in_cset(NULL, obj);
|
||||
|
||||
@ -288,8 +282,8 @@ oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
|
||||
return fwd;
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
|
||||
assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
|
||||
oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
|
||||
assert(ShenandoahLoadRefBarrier, "should be enabled");
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
oop fwd = resolve_forwarded_not_null(obj);
|
||||
@ -311,23 +305,10 @@ oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
|
||||
}
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::write_barrier(oop obj) {
|
||||
if (ShenandoahWriteBarrier && _heap->has_forwarded_objects()) {
|
||||
return write_barrier_impl(obj);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
|
||||
void ShenandoahBarrierSet::storeval_barrier(oop obj) {
|
||||
if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
|
||||
obj = write_barrier(obj);
|
||||
enqueue(obj);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
obj = resolve_forwarded(obj);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
|
||||
|
@ -87,24 +87,15 @@ public:
|
||||
virtual void on_thread_attach(Thread* thread);
|
||||
virtual void on_thread_detach(Thread* thread);
|
||||
|
||||
virtual oop read_barrier(oop src);
|
||||
|
||||
static inline oop resolve_forwarded_not_null(oop p);
|
||||
static inline oop resolve_forwarded(oop p);
|
||||
|
||||
virtual oop write_barrier(oop obj);
|
||||
void storeval_barrier(oop obj);
|
||||
void keep_alive_barrier(oop obj);
|
||||
|
||||
oop write_barrier_mutator(oop obj);
|
||||
|
||||
virtual oop storeval_barrier(oop obj);
|
||||
|
||||
virtual void keep_alive_barrier(oop obj);
|
||||
|
||||
bool obj_equals(oop obj1, oop obj2);
|
||||
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
bool oop_equals_operator_allowed() { return !ShenandoahVerifyObjectEquals; }
|
||||
#endif
|
||||
oop load_reference_barrier(oop obj);
|
||||
oop load_reference_barrier_mutator(oop obj);
|
||||
oop load_reference_barrier_not_null(oop obj);
|
||||
|
||||
void enqueue(oop obj);
|
||||
|
||||
@ -114,7 +105,7 @@ private:
|
||||
template <class T, bool STOREVAL_WRITE_BARRIER>
|
||||
void write_ref_array_loop(HeapWord* start, size_t count);
|
||||
|
||||
oop write_barrier_impl(oop obj);
|
||||
oop load_reference_barrier_impl(oop obj);
|
||||
|
||||
static void keep_alive_if_weak(DecoratorSet decorators, oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
@ -149,114 +140,31 @@ public:
|
||||
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
|
||||
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap_impl(oop new_value, T* addr);
|
||||
|
||||
public:
|
||||
// Primitive heap accesses. These accessors get resolved when
|
||||
// IN_HEAP is set (e.g. when using the HeapAccess API), it is
|
||||
// not an oop_* overload, and the barrier strength is AS_NORMAL.
|
||||
template <typename T>
|
||||
static T load_in_heap(T* addr) {
|
||||
ShouldNotReachHere();
|
||||
return Raw::template load<T>(addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
base = ShenandoahBarrierSet::resolve_forwarded(base);
|
||||
return Raw::template load_at<T>(base, offset);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void store_in_heap(T* addr, T value) {
|
||||
ShouldNotReachHere();
|
||||
Raw::store(addr, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
Raw::store_at(base, offset, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
|
||||
ShouldNotReachHere();
|
||||
return Raw::atomic_cmpxchg(new_value, addr, compare_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_xchg_in_heap(T new_value, T* addr) {
|
||||
ShouldNotReachHere();
|
||||
return Raw::atomic_xchg(new_value, addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
return Raw::atomic_xchg_at(new_value, base, offset);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length);
|
||||
|
||||
// Heap oop accesses. These accessors get resolved when
|
||||
// IN_HEAP is set (e.g. when using the HeapAccess API), it is
|
||||
// an oop_* overload, and the barrier strength is AS_NORMAL.
|
||||
template <typename T>
|
||||
static oop oop_load_in_heap(T* addr) {
|
||||
// ShouldNotReachHere();
|
||||
oop value = Raw::template oop_load<oop>(addr);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
base = ShenandoahBarrierSet::resolve_forwarded(base);
|
||||
oop value = Raw::template oop_load_at<oop>(base, offset);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
|
||||
return value;
|
||||
}
|
||||
static oop oop_load_in_heap(T* addr);
|
||||
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
|
||||
|
||||
template <typename T>
|
||||
static void oop_store_in_heap(T* addr, oop value) {
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive) {
|
||||
ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
|
||||
}
|
||||
Raw::oop_store(addr, value);
|
||||
}
|
||||
|
||||
static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
|
||||
|
||||
oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
|
||||
}
|
||||
static void oop_store_in_heap(T* addr, oop value);
|
||||
static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
|
||||
}
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
|
||||
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
|
||||
new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
|
||||
}
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
@ -268,19 +176,13 @@ public:
|
||||
|
||||
// Needed for loads on non-heap weak references
|
||||
template <typename T>
|
||||
static oop oop_load_not_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_not_in_heap(addr);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
return value;
|
||||
}
|
||||
static oop oop_load_not_in_heap(T* addr);
|
||||
|
||||
static oop resolve(oop obj) {
|
||||
return ShenandoahBarrierSet::barrier_set()->write_barrier(obj);
|
||||
}
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
static bool equals(oop o1, oop o2) {
|
||||
return ShenandoahBarrierSet::barrier_set()->obj_equals(o1, o2);
|
||||
}
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
|
||||
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,49 @@ inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_in_heap(addr);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
oop value = Raw::oop_load_in_heap_at(base, offset);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_not_in_heap(addr);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap(T* addr, oop value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive) {
|
||||
ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
|
||||
}
|
||||
Raw::oop_store_in_heap(addr, value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
|
||||
oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
oop res;
|
||||
oop expected = compare_value;
|
||||
do {
|
||||
@ -60,42 +102,79 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
|
||||
expected = res;
|
||||
} while ((! oopDesc::equals_raw(compare_value, expected)) && oopDesc::equals_raw(resolve_forwarded(compare_value), resolve_forwarded(expected)));
|
||||
if (oopDesc::equals_raw(expected, compare_value)) {
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(compare_value) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(compare_value);
|
||||
}
|
||||
if (res != NULL) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res);
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
oopDesc::equals_raw(result, compare_value) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, addr, compare_value);
|
||||
keep_alive_if_weak(decorators, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
|
||||
oop previous = Raw::oop_atomic_xchg(new_value, addr);
|
||||
if (previous != NULL) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
|
||||
} else {
|
||||
return previous;
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(oop new_value, T* addr) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_xchg_not_in_heap(new_value, addr);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
|
||||
oop previous = Raw::oop_atomic_xchg(new_value, addr);
|
||||
if (ShenandoahSATBBarrier) {
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && !CompressedOops::is_null(previous) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(previous);
|
||||
}
|
||||
}
|
||||
return previous;
|
||||
oop result = oop_atomic_xchg_in_heap_impl(new_value, addr);
|
||||
keep_alive_if_weak(addr, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
if (!CompressedOops::is_null(src_obj)) {
|
||||
src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
|
||||
}
|
||||
if (!CompressedOops::is_null(dst_obj)) {
|
||||
dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
|
||||
}
|
||||
Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -248,8 +327,6 @@ bool ShenandoahBarrierSet::arraycopy_element(T* cur_src, T* cur_dst, Klass* boun
|
||||
// Clone barrier support
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
|
||||
src = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src));
|
||||
dst = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst));
|
||||
Raw::clone(src, dst, size);
|
||||
ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) dst, size));
|
||||
}
|
||||
@ -260,13 +337,6 @@ bool ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!CompressedOops::is_null(src_obj)) {
|
||||
src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
|
||||
}
|
||||
if (!CompressedOops::is_null(dst_obj)) {
|
||||
dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
|
||||
}
|
||||
|
||||
bool satb = ShenandoahSATBBarrier && heap->is_concurrent_mark_in_progress();
|
||||
bool checkcast = HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value;
|
||||
bool disjoint = HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value;
|
||||
|
@ -119,39 +119,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class ShenandoahNMethodOopInitializer : public OopClosure {
|
||||
private:
|
||||
ShenandoahHeap* const _heap;
|
||||
|
||||
public:
|
||||
ShenandoahNMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {};
|
||||
|
||||
private:
|
||||
template <class T>
|
||||
inline void do_oop_work(T* p) {
|
||||
T o = RawAccess<>::oop_load(p);
|
||||
if (! CompressedOops::is_null(o)) {
|
||||
oop obj1 = CompressedOops::decode_not_null(o);
|
||||
oop obj2 = ShenandoahBarrierSet::barrier_set()->write_barrier(obj1);
|
||||
if (! oopDesc::equals_raw(obj1, obj2)) {
|
||||
shenandoah_assert_not_in_cset(NULL, obj2);
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, obj2);
|
||||
if (_heap->is_concurrent_traversal_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(obj2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void do_oop(oop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
void do_oop(narrowOop* o) {
|
||||
do_oop_work(o);
|
||||
}
|
||||
};
|
||||
|
||||
ShenandoahCodeRoots::PaddedLock ShenandoahCodeRoots::_recorded_nms_lock;
|
||||
GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms;
|
||||
|
||||
@ -163,21 +130,13 @@ void ShenandoahCodeRoots::initialize() {
|
||||
void ShenandoahCodeRoots::add_nmethod(nmethod* nm) {
|
||||
switch (ShenandoahCodeRootsStyle) {
|
||||
case 0:
|
||||
case 1: {
|
||||
ShenandoahNMethodOopInitializer init;
|
||||
nm->oops_do(&init);
|
||||
nm->fix_oop_relocations();
|
||||
case 1:
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
ShenandoahNMethodOopDetector detector;
|
||||
nm->oops_do(&detector);
|
||||
|
||||
if (detector.has_oops()) {
|
||||
ShenandoahNMethodOopInitializer init;
|
||||
nm->oops_do(&init);
|
||||
nm->fix_oop_relocations();
|
||||
|
||||
ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops());
|
||||
nmr->assert_alive_and_correct();
|
||||
|
||||
|
@ -31,8 +31,8 @@
|
||||
* Provides safe handling of out-of-memory situations during evacuation.
|
||||
*
|
||||
* When a Java thread encounters out-of-memory while evacuating an object in a
|
||||
* write-barrier (i.e. it cannot copy the object to to-space), it does not necessarily
|
||||
* follow we can return immediately from the WB (and store to from-space).
|
||||
* load-reference-barrier (i.e. it cannot copy the object to to-space), it does not
|
||||
* necessarily follow we can return immediately from the LRB (and store to from-space).
|
||||
*
|
||||
* In very basic case, on such failure we may wait until the the evacuation is over,
|
||||
* and then resolve the forwarded copy, and to the store there. This is possible
|
||||
@ -64,17 +64,17 @@
|
||||
* - failure:
|
||||
* - if offending value is a valid counter, then try again
|
||||
* - if offending value is OOM-during-evac special value: loop until
|
||||
* counter drops to 0, then exit with read-barrier
|
||||
* counter drops to 0, then exit with resolving the ptr
|
||||
*
|
||||
* Upon exit, exiting thread will decrease the counter using atomic dec.
|
||||
*
|
||||
* Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac
|
||||
* special value into the counter. Depending on result:
|
||||
* - success: busy-loop until counter drops to zero, then exit with RB
|
||||
* - success: busy-loop until counter drops to zero, then exit with resolve
|
||||
* - failure:
|
||||
* - offender is valid counter update: try again
|
||||
* - offender is OOM-during-evac: busy loop until counter drops to
|
||||
* zero, then exit with RB
|
||||
* zero, then exit with resolve
|
||||
*/
|
||||
class ShenandoahEvacOOMHandler {
|
||||
private:
|
||||
@ -94,7 +94,7 @@ public:
|
||||
*
|
||||
* When this returns true, it is safe to continue with normal evacuation.
|
||||
* When this method returns false, evacuation must not be entered, and caller
|
||||
* may safely continue with a read-barrier (if Java thread).
|
||||
* may safely continue with a simple resolve (if Java thread).
|
||||
*/
|
||||
void enter_evacuation();
|
||||
|
||||
@ -106,7 +106,7 @@ public:
|
||||
/**
|
||||
* Signal out-of-memory during evacuation. It will prevent any other threads
|
||||
* from entering the evacuation path, then wait until all threads have left the
|
||||
* evacuation path, and then return. It is then safe to continue with a read-barrier.
|
||||
* evacuation path, and then return. It is then safe to continue with a simple resolve.
|
||||
*/
|
||||
void handle_out_of_memory_during_evacuation();
|
||||
|
||||
|
@ -389,10 +389,6 @@ void ShenandoahHeap::initialize_heuristics() {
|
||||
err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
|
||||
_heuristics->name()));
|
||||
}
|
||||
|
||||
if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
|
||||
vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
|
||||
}
|
||||
log_info(gc, init)("Shenandoah heuristics: %s",
|
||||
_heuristics->name());
|
||||
} else {
|
||||
@ -791,7 +787,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
assert(req.is_gc_alloc(), "Can only accept GC allocs here");
|
||||
result = allocate_memory_under_lock(req, in_new_region);
|
||||
// Do not call handle_alloc_failure() here, because we cannot block.
|
||||
// The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
|
||||
// The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
|
||||
}
|
||||
|
||||
if (in_new_region) {
|
||||
@ -1105,7 +1101,6 @@ public:
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahEvacOOMScope oom_evac_scope;
|
||||
ShenandoahEvacuateUpdateRootsClosure cl;
|
||||
|
||||
MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
|
||||
_rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
|
||||
}
|
||||
@ -2062,14 +2057,12 @@ void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
|
||||
}
|
||||
|
||||
oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
|
||||
o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
heap_region_containing(o)->make_pinned();
|
||||
return o;
|
||||
}
|
||||
|
||||
void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
|
||||
o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
heap_region_containing(o)->make_unpinned();
|
||||
}
|
||||
|
@ -270,16 +270,16 @@ public:
|
||||
//
|
||||
public:
|
||||
enum GCStateBitPos {
|
||||
// Heap has forwarded objects: need RB, ACMP, CAS barriers.
|
||||
// Heap has forwarded objects: needs LRB barriers.
|
||||
HAS_FORWARDED_BITPOS = 0,
|
||||
|
||||
// Heap is under marking: needs SATB barriers.
|
||||
MARKING_BITPOS = 1,
|
||||
|
||||
// Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
|
||||
// Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
|
||||
EVACUATION_BITPOS = 2,
|
||||
|
||||
// Heap is under updating: needs SVRB/SVWB barriers.
|
||||
// Heap is under updating: needs no additional barriers.
|
||||
UPDATEREFS_BITPOS = 3,
|
||||
|
||||
// Heap is under traversal collection
|
||||
|
@ -129,7 +129,7 @@ void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
|
||||
|
||||
// Once marking is done, which may have fixed up forwarded objects, we can drop it.
|
||||
// Coming out of Full GC, we would not have any forwarded objects.
|
||||
// This also prevents read barrier from kicking in while adjusting pointers in phase3.
|
||||
// This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
|
||||
heap->set_has_forwarded_objects(false);
|
||||
|
||||
heap->set_full_gc_move_in_progress(true);
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
enum UpdateRefsMode {
|
||||
NONE, // No reference updating
|
||||
RESOLVE, // Only a read-barrier (no reference updating)
|
||||
RESOLVE, // Only a resolve (no reference updating)
|
||||
SIMPLE, // Reference updating using simple store
|
||||
CONCURRENT // Reference updating using CAS
|
||||
};
|
||||
|
@ -242,13 +242,21 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_wo
|
||||
_evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
|
||||
_srs(n_workers),
|
||||
_phase(phase),
|
||||
_coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
|
||||
_coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()),
|
||||
_par_state_string(StringTable::weak_storage())
|
||||
|
||||
{
|
||||
heap->phase_timings()->record_workers_start(_phase);
|
||||
if (ShenandoahStringDedup::is_enabled()) {
|
||||
StringDedup::gc_prologue(false);
|
||||
}
|
||||
}
|
||||
|
||||
ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
|
||||
delete _evacuation_tasks;
|
||||
if (ShenandoahStringDedup::is_enabled()) {
|
||||
StringDedup::gc_epilogue();
|
||||
}
|
||||
ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
|
||||
}
|
||||
|
||||
@ -270,11 +278,38 @@ void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
|
||||
_coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_jvmti_oops_do)) {
|
||||
if (ShenandoahStringDedup::is_enabled()) {
|
||||
ShenandoahForwardedIsAliveClosure is_alive;
|
||||
ShenandoahStringDedup::parallel_oops_do(&is_alive, oops, worker_id);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_Universe_oops_do)) {
|
||||
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id);
|
||||
Universe::oops_do(oops);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_Management_oops_do)) {
|
||||
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id);
|
||||
Management::oops_do(oops);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_jvmti_oops_do)) {
|
||||
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
|
||||
JvmtiExport::oops_do(oops);
|
||||
ShenandoahForwardedIsAliveClosure is_alive;
|
||||
JvmtiExport::weak_oops_do(&is_alive, oops);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_SystemDictionary_oops_do)) {
|
||||
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
|
||||
SystemDictionary::oops_do(oops);
|
||||
}
|
||||
|
||||
if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_ObjectSynchronizer_oops_do)) {
|
||||
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
uint ShenandoahRootEvacuator::n_workers() const {
|
||||
|
@ -58,7 +58,7 @@ class ShenandoahRootProcessor : public StackObj {
|
||||
StrongRootsScope _srs;
|
||||
OopStorage::ParState<false, false> _par_state_string;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
ParallelCLDRootIterator _cld_iterator;
|
||||
ParallelCLDRootIterator _cld_iterator;
|
||||
ShenandoahAllCodeRootsIterator _coderoots_all_iterator;
|
||||
CodeBlobClosure* _threads_nmethods_cl;
|
||||
WeakProcessorPhaseTimes _weak_processor_timings;
|
||||
@ -120,11 +120,16 @@ class ShenandoahRootEvacuator : public StackObj {
|
||||
StrongRootsScope _srs;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
ShenandoahCsetCodeRootsIterator _coderoots_cset_iterator;
|
||||
OopStorage::ParState<false, false> _par_state_string;
|
||||
|
||||
enum Shenandoah_evacuate_roots_tasks {
|
||||
SHENANDOAH_EVAC_jvmti_oops_do,
|
||||
// Leave this one last.
|
||||
SHENANDOAH_EVAC_NumElements
|
||||
SHENANDOAH_EVAC_Universe_oops_do,
|
||||
SHENANDOAH_EVAC_ObjectSynchronizer_oops_do,
|
||||
SHENANDOAH_EVAC_Management_oops_do,
|
||||
SHENANDOAH_EVAC_SystemDictionary_oops_do,
|
||||
SHENANDOAH_EVAC_jvmti_oops_do,
|
||||
// Leave this one last.
|
||||
SHENANDOAH_EVAC_NumElements
|
||||
};
|
||||
public:
|
||||
ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers,
|
||||
|
@ -55,8 +55,8 @@ JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaT
|
||||
ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(orig);
|
||||
JRT_END
|
||||
|
||||
JRT_LEAF(oopDesc*, ShenandoahRuntime::write_barrier_JRT(oopDesc* src))
|
||||
oop result = ShenandoahBarrierSet::barrier_set()->write_barrier_mutator(src);
|
||||
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_JRT(oopDesc* src))
|
||||
oop result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src);
|
||||
return (oopDesc*) result;
|
||||
JRT_END
|
||||
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
static void write_ref_array_post_entry(HeapWord* dst, size_t length);
|
||||
static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread);
|
||||
|
||||
static oopDesc* write_barrier_JRT(oopDesc* src);
|
||||
static oopDesc* load_reference_barrier_JRT(oopDesc* src);
|
||||
|
||||
static void shenandoah_clone_barrier(oopDesc* obj);
|
||||
};
|
||||
|
@ -244,7 +244,7 @@
|
||||
"Time is in microseconds.") \
|
||||
\
|
||||
experimental(uintx, ShenandoahEvacAssist, 10, \
|
||||
"How many objects to evacuate on WB assist path. " \
|
||||
"How many objects to evacuate on LRB assist path. " \
|
||||
"Use zero to disable.") \
|
||||
\
|
||||
experimental(bool, ShenandoahPacing, true, \
|
||||
@ -352,27 +352,18 @@
|
||||
diagnostic(bool, ShenandoahKeepAliveBarrier, true, \
|
||||
"Turn on/off keep alive barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahWriteBarrier, true, \
|
||||
"Turn on/off write barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahReadBarrier, true, \
|
||||
"Turn on/off read barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false, \
|
||||
"Turn on/off enqueuing of oops for storeval barriers") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahStoreValReadBarrier, true, \
|
||||
"Turn on/off store val read barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahCASBarrier, true, \
|
||||
"Turn on/off CAS barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahAcmpBarrier, true, \
|
||||
"Turn on/off acmp barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahCloneBarrier, true, \
|
||||
"Turn on/off clone barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahLoadRefBarrier, true, \
|
||||
"Turn on/off load-reference barriers in Shenandoah") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahStoreCheck, false, \
|
||||
"Emit additional code that checks objects are written to only" \
|
||||
" in to-space") \
|
||||
@ -401,20 +392,13 @@
|
||||
"Turn it off for maximum compatibility with reflection or JNI " \
|
||||
"code that manipulates final fields.") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahDecreaseRegisterPressure, false, \
|
||||
"Try to reuse after-barrier values to reduce register pressure") \
|
||||
\
|
||||
experimental(bool, ShenandoahCommonGCStateLoads, false, \
|
||||
"Enable commonming for GC state loads in generated code.") \
|
||||
\
|
||||
develop(bool, ShenandoahVerifyOptoBarriers, false, \
|
||||
"Verify no missing barriers in C2") \
|
||||
\
|
||||
experimental(bool, ShenandoahDontIncreaseWBFreq, true, \
|
||||
"Common 2 WriteBarriers or WriteBarrier and a ReadBarrier only " \
|
||||
"if the resulting WriteBarrier isn't executed more frequently") \
|
||||
\
|
||||
experimental(bool, ShenandoahLoopOptsAfterExpansion, true, \
|
||||
"Attempt more loop opts after write barrier expansion") \
|
||||
"Attempt more loop opts after barrier expansion") \
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
|
||||
|
@ -281,9 +281,7 @@ shmacro(ShenandoahCompareAndSwapP)
|
||||
shmacro(ShenandoahWeakCompareAndSwapN)
|
||||
shmacro(ShenandoahWeakCompareAndSwapP)
|
||||
shmacro(ShenandoahEnqueueBarrier)
|
||||
shmacro(ShenandoahReadBarrier)
|
||||
shmacro(ShenandoahWriteBarrier)
|
||||
shmacro(ShenandoahWBMemProj)
|
||||
shmacro(ShenandoahLoadReferenceBarrier)
|
||||
macro(SCMemProj)
|
||||
macro(SqrtD)
|
||||
macro(SqrtF)
|
||||
|
@ -3070,7 +3070,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
|
||||
Node *m = wq.at(next);
|
||||
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
|
||||
Node* use = m->fast_out(i);
|
||||
if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) {
|
||||
if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
|
||||
use->ensure_control_or_add_prec(n->in(0));
|
||||
} else {
|
||||
switch(use->Opcode()) {
|
||||
|
@ -178,7 +178,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
case Op_LoadRange:
|
||||
case Op_LoadD_unaligned:
|
||||
case Op_LoadL_unaligned:
|
||||
case Op_ShenandoahReadBarrier:
|
||||
assert(mach->in(2) == val, "should be address");
|
||||
break;
|
||||
case Op_StoreB:
|
||||
|
@ -4485,7 +4485,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
|
||||
for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
|
||||
Node* n = mms.memory();
|
||||
if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
|
||||
assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
|
||||
assert(n->is_Store(), "what else?");
|
||||
no_interfering_store = false;
|
||||
break;
|
||||
}
|
||||
@ -4494,7 +4494,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
|
||||
for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
|
||||
Node* n = mms.memory();
|
||||
if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
|
||||
assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
|
||||
assert(n->is_Store(), "what else?");
|
||||
no_interfering_store = false;
|
||||
break;
|
||||
}
|
||||
|
@ -536,9 +536,6 @@ class Invariance : public StackObj {
|
||||
if (_lpt->is_invariant(n)) { // known invariant
|
||||
_invariant.set(n->_idx);
|
||||
} else if (!n->is_CFG()) {
|
||||
if (n->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
return;
|
||||
}
|
||||
Node *n_ctrl = _phase->ctrl_or_self(n);
|
||||
Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
|
||||
if (_phase->is_dominator(n_ctrl, u_ctrl)) {
|
||||
|
@ -3971,7 +3971,7 @@ Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
|
||||
}
|
||||
while(worklist.size() != 0 && LCA != early) {
|
||||
Node* s = worklist.pop();
|
||||
if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
|
||||
if (s->is_Load() || s->Opcode() == Op_SafePoint ||
|
||||
(s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
|
||||
continue;
|
||||
} else if (s->is_MergeMem()) {
|
||||
|
@ -38,8 +38,6 @@ class IdealLoopTree;
|
||||
class LoopNode;
|
||||
class Node;
|
||||
class OuterStripMinedLoopEndNode;
|
||||
class ShenandoahBarrierNode;
|
||||
class ShenandoahWriteBarrierNode;
|
||||
class PathFrequency;
|
||||
class PhaseIdealLoop;
|
||||
class CountedLoopReserveKit;
|
||||
@ -638,8 +636,7 @@ class PhaseIdealLoop : public PhaseTransform {
|
||||
friend class IdealLoopTree;
|
||||
friend class SuperWord;
|
||||
friend class CountedLoopReserveKit;
|
||||
friend class ShenandoahBarrierNode;
|
||||
friend class ShenandoahWriteBarrierNode;
|
||||
friend class ShenandoahBarrierC2Support;
|
||||
|
||||
// Pre-computed def-use info
|
||||
PhaseIterGVN &_igvn;
|
||||
|
@ -1082,11 +1082,6 @@ static bool merge_point_safe(Node* region) {
|
||||
Node* m = n->fast_out(j);
|
||||
if (m->is_FastLock())
|
||||
return false;
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
if (m->Opcode() == Op_ConvI2L)
|
||||
return false;
|
||||
@ -3210,7 +3205,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
|
||||
// if not pinned and not a load (which maybe anti-dependent on a store)
|
||||
// and not a CMove (Matcher expects only bool->cmove).
|
||||
if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) {
|
||||
if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
|
||||
cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
|
||||
sink_list.push(n);
|
||||
peel >>= n->_idx; // delete n from peel set.
|
||||
|
@ -142,7 +142,6 @@ class RegionNode;
|
||||
class RootNode;
|
||||
class SafePointNode;
|
||||
class SafePointScalarObjectNode;
|
||||
class ShenandoahBarrierNode;
|
||||
class StartNode;
|
||||
class State;
|
||||
class StoreNode;
|
||||
@ -676,7 +675,6 @@ public:
|
||||
DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
|
||||
DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
|
||||
DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
|
||||
DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7)
|
||||
|
||||
DEFINE_CLASS_ID(Proj, Node, 3)
|
||||
DEFINE_CLASS_ID(CatchProj, Proj, 0)
|
||||
@ -875,7 +873,6 @@ public:
|
||||
DEFINE_CLASS_QUERY(Root)
|
||||
DEFINE_CLASS_QUERY(SafePoint)
|
||||
DEFINE_CLASS_QUERY(SafePointScalarObject)
|
||||
DEFINE_CLASS_QUERY(ShenandoahBarrier)
|
||||
DEFINE_CLASS_QUERY(Start)
|
||||
DEFINE_CLASS_QUERY(Store)
|
||||
DEFINE_CLASS_QUERY(Sub)
|
||||
|
@ -43,13 +43,9 @@ public class TestSelectiveBarrierFlags {
|
||||
public static void main(String[] args) throws Exception {
|
||||
String[][] opts = {
|
||||
new String[] { "ShenandoahKeepAliveBarrier" },
|
||||
new String[] { "ShenandoahWriteBarrier" },
|
||||
new String[] { "ShenandoahReadBarrier" },
|
||||
// StoreValRead+SATB are actually compatible, but we need to protect against
|
||||
// StorveValEnqueue+SATB. TODO: Make it better.
|
||||
new String[] { "ShenandoahSATBBarrier", "ShenandoahStoreValReadBarrier", "ShenandoahStoreValEnqueueBarrier" },
|
||||
new String[] { "ShenandoahLoadRefBarrier" },
|
||||
new String[] { "ShenandoahSATBBarrier", "ShenandoahStoreValEnqueueBarrier" },
|
||||
new String[] { "ShenandoahCASBarrier" },
|
||||
new String[] { "ShenandoahAcmpBarrier" },
|
||||
new String[] { "ShenandoahCloneBarrier" },
|
||||
};
|
||||
|
||||
|
@ -38,21 +38,16 @@ public class TestWrongBarrierDisable {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String[] concurrent = {
|
||||
"ShenandoahReadBarrier",
|
||||
"ShenandoahWriteBarrier",
|
||||
"ShenandoahLoadRefBarrier",
|
||||
"ShenandoahCASBarrier",
|
||||
"ShenandoahAcmpBarrier",
|
||||
"ShenandoahCloneBarrier",
|
||||
"ShenandoahSATBBarrier",
|
||||
"ShenandoahKeepAliveBarrier",
|
||||
"ShenandoahStoreValReadBarrier",
|
||||
};
|
||||
|
||||
String[] traversal = {
|
||||
"ShenandoahReadBarrier",
|
||||
"ShenandoahWriteBarrier",
|
||||
"ShenandoahLoadRefBarrier",
|
||||
"ShenandoahCASBarrier",
|
||||
"ShenandoahAcmpBarrier",
|
||||
"ShenandoahCloneBarrier",
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user