8299312: Clean up BarrierSetNMethod

Reviewed-by: mdoerr, fyang
This commit is contained in:
Erik Österlund 2023-01-09 13:31:26 +00:00
parent 66db0bb6a1
commit 4ba8122197
29 changed files with 93 additions and 165 deletions

View File

@ -1739,7 +1739,7 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("\n\t");
st->print("ldr rscratch1, [guard]\n\t");
st->print("dmb ishld\n\t");
st->print("ldr rscratch2, [rthread, #thread_disarmed_offset]\n\t");
st->print("ldr rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
st->print("cmp rscratch1, rscratch2\n\t");
st->print("b.eq skip");
st->print("\n\t");

View File

@ -217,7 +217,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
// instruction patching is handled with isb fences on the way back
// from the safepoint to Java. So here we can do a plain conditional
// branch with no fencing.
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ ldrw(rscratch2, thread_disarmed_addr);
__ cmp(rscratch1, rscratch2);
} else if (patching_type == NMethodPatchingType::conc_instruction_and_data_patch) {
@ -238,7 +238,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
// Combine the guard value (low order) with the epoch value (high order).
__ orr(rscratch1, rscratch1, rscratch2, Assembler::LSL, 32);
// Compare the global values with the thread-local values.
Address thread_disarmed_and_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_and_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ ldr(rscratch2, thread_disarmed_and_epoch_addr);
__ cmp(rscratch1, rscratch2);
} else {
@ -246,7 +246,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
// Subsequent loads of oops must occur after load of guard value.
// BarrierSetNMethod::disarm sets guard with release semantics.
__ membar(__ LoadLoad);
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ ldrw(rscratch2, thread_disarmed_addr);
__ cmpw(rscratch1, rscratch2);
}

View File

@ -163,32 +163,12 @@ static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
return barrier;
}
void BarrierSetNMethod::disarm(nmethod* nm) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
// The patching epoch is incremented before the nmethod is disarmed. Disarming
// is performed with a release store. In the nmethod entry barrier, the values
// are read in the opposite order, such that the load of the nmethod guard
// acquires the patching epoch. This way, the guard is guaranteed to block
// entries to the nmethod, until it has safely published the requirement for
// further fencing by mutators, before they are allowed to enter.
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->increment_patching_epoch();
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
// Symmetric "LDR; DMB ISHLD" is in the nmethod barrier.
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, disarmed_value());
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
if (!supports_entry_barrier(nm)) {
return;
}
if (arm_value == disarmed_value()) {
if (value == disarmed_guard_value()) {
// The patching epoch is incremented before the nmethod is disarmed. Disarming
// is performed with a release store. In the nmethod entry barrier, the values
// are read in the opposite order, such that the load of the nmethod guard
@ -200,14 +180,14 @@ void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, arm_value);
barrier->set_value(nm, value);
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
return disarmed_guard_value();
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value(nm) != disarmed_value();
return barrier->get_value(nm);
}

View File

@ -5334,7 +5334,7 @@ class StubGenerator: public StubCodeGenerator {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
// We can get here despite the nmethod being good, if we have not
// yet applied our cross modification fence (or data fence).
Address thread_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()) + 4);
Address thread_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()) + 4);
__ lea(rscratch2, ExternalAddress(bs_asm->patching_epoch_addr()));
__ ldrw(rscratch2, rscratch2);
__ strw(rscratch2, thread_epoch_addr);

View File

@ -292,7 +292,7 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
st->print("ldr t0, [guard]\n\t");
st->print("ldr t1, [Rthread, #thread_disarmed_offset]\n\t");
st->print("ldr t1, [Rthread, #thread_disarmed_guard_value_offset]\n\t");
st->print("cmp t0, t1\n\t");
st->print("beq skip\n\t");
st->print("blr #nmethod_entry_barrier_stub\n\t");

View File

@ -217,7 +217,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
#endif
Label skip, guard;
Address thread_disarmed_addr(Rthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_addr(Rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ block_comment("nmethod_barrier begin");
__ ldr_label(tmp0, guard);

View File

@ -116,7 +116,7 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
}
void BarrierSetNMethod::disarm(nmethod* nm) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
@ -124,23 +124,14 @@ void BarrierSetNMethod::disarm(nmethod* nm) {
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
// Symmetric "LDR; DMB ISHLD" is in the nmethod barrier.
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(disarmed_value());
barrier->set_value(value);
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return;
return disarmed_guard_value();
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(arm_value);
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value() != disarmed_value();
return barrier->get_value();
}

View File

@ -163,7 +163,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register t
__ load_const32(tmp, 0 /* Value is patched */); // 2 instructions
// Low order half of 64 bit value is currently used.
__ ld(R0, in_bytes(bs_nm->thread_disarmed_offset()), R16_thread);
__ ld(R0, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), R16_thread);
__ cmpw(CCR0, R0, tmp);
__ bnectrl(CCR0);

View File

@ -118,29 +118,20 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
// Thus, there's nothing to do here.
}
void BarrierSetNMethod::disarm(nmethod* nm) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
barrier->release_set_guard_value(disarmed_value());
barrier->release_set_guard_value(value);
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return;
return disarmed_guard_value();
}
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
barrier->release_set_guard_value(arm_value);
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
}
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
return barrier->get_guard_value() != disarmed_value();
return barrier->get_guard_value();
}

View File

@ -220,7 +220,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
// instruction patching is synchronized with global icache_flush() by
// the write hart on riscv. So here we can do a plain conditional
// branch with no fencing.
Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ lwu(t1, thread_disarmed_addr);
break;
}
@ -245,7 +245,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
__ slli(t1, t1, 32);
__ orr(t0, t0, t1);
// Compare the global values with the thread-local values
Address thread_disarmed_and_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
Address thread_disarmed_and_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
__ ld(t1, thread_disarmed_and_epoch_addr);
break;
}

View File

@ -171,32 +171,12 @@ static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
return barrier;
}
void BarrierSetNMethod::disarm(nmethod* nm) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
// The patching epoch is incremented before the nmethod is disarmed. Disarming
// is performed with a release store. In the nmethod entry barrier, the values
// are read in the opposite order, such that the load of the nmethod guard
// acquires the patching epoch. This way, the guard is guaranteed to block
// entries to the nmethod, util it has safely published the requirement for
// further fencing by mutators, before they are allowed to enter.
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->increment_patching_epoch();
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
// Symmetric "LD; FENCE IR, IR" is in the nmethod barrier.
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, disarmed_value());
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
if (!supports_entry_barrier(nm)) {
return;
}
if (arm_value == disarmed_value()) {
if (value == disarmed_guard_value()) {
// The patching epoch is incremented before the nmethod is disarmed. Disarming
// is performed with a release store. In the nmethod entry barrier, the values
// are read in the opposite order, such that the load of the nmethod guard
@ -208,14 +188,14 @@ void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, arm_value);
barrier->set_value(nm, value);
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
return disarmed_guard_value();
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value(nm) != disarmed_value();
return barrier->get_value(nm);
}

View File

@ -1303,7 +1303,7 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
st->print("ld t0, [guard]\n\t");
st->print("membar LoadLoad\n\t");
st->print("ld t1, [xthread, #thread_disarmed_offset]\n\t");
st->print("ld t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
st->print("beq t0, t1, skip\n\t");
st->print("jalr #nmethod_entry_barrier_stub\n\t");
st->print("j skip\n\t");

View File

@ -2461,7 +2461,7 @@ class StubGenerator: public StubCodeGenerator {
if (bs_asm->nmethod_patching_type() == NMethodPatchingType::conc_instruction_and_data_patch) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
Address thread_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()) + 4);
Address thread_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()) + 4);
__ la(t1, ExternalAddress(bs_asm->patching_epoch_addr()));
__ lwu(t1, t1);
__ sw(t1, thread_epoch_addr);

View File

@ -135,7 +135,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
__ load_const(Z_R1_scratch, (uint64_t)StubRoutines::zarch::nmethod_entry_barrier()); // 2*6 bytes
// Load value from current java object:
__ z_lg(Z_R0_scratch, in_bytes(bs_nm->thread_disarmed_offset()), Z_thread); // 6 bytes
__ z_lg(Z_R0_scratch, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), Z_thread); // 6 bytes
// Compare to current patched value:
__ z_cfi(Z_R0_scratch, /* to be patched */ -1); // 6 bytes (2 + 4 byte imm val)

View File

@ -101,29 +101,20 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
return;
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
NativeMethodBarrier* barrier = get_nmethod_barrier(nm);
barrier->set_guard_value(arm_value);
barrier->set_guard_value(value);
}
void BarrierSetNMethod::disarm(nmethod* nm) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return;
return disarmed_guard_value();
}
NativeMethodBarrier* barrier = get_nmethod_barrier(nm);
barrier->set_guard_value(disarmed_value());
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
}
NativeMethodBarrier* barrier = get_nmethod_barrier(nm);
return barrier->get_guard_value() != disarmed_value();
return barrier->get_guard_value();
}

View File

@ -279,7 +279,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
return;
}
Register thread = r15_thread;
Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_offset()));
Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
// The immediate is the last 4 bytes, so if we align the start of the cmp
// instruction to 4 bytes, we know that the second half of it is also 4
// byte aligned, which means that the immediate will not cross a cache line
@ -310,7 +310,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, La
Register tmp = rdi;
__ push(tmp);
__ movptr(tmp, (intptr_t)bs_nm->disarmed_value_address());
__ movptr(tmp, (intptr_t)bs_nm->disarmed_guard_value_address());
Address disarmed_addr(tmp, 0);
__ align(4);
__ cmpl_imm32(disarmed_addr, 0);

View File

@ -56,7 +56,7 @@ public:
address instruction_address() const { return addr_at(0); }
address immediate_address() const { return addr_at(imm_offset); }
jint get_immedate() const { return int_at(imm_offset); }
jint get_immediate() const { return int_at(imm_offset); }
void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
void verify() const;
};
@ -176,29 +176,20 @@ static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
return barrier;
}
void BarrierSetNMethod::disarm(nmethod* nm) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
cmp->set_immediate(disarmed_value());
cmp->set_immediate(value);
}
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return;
return disarmed_guard_value();
}
NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
cmp->set_immediate(arm_value);
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return false;
}
NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
return (disarmed_value() != cmp->get_immedate());
return cmp->get_immediate();
}

View File

@ -889,7 +889,7 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
}
if (C->stub_function() != NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
st->print("\n\t");
st->print("cmpl [r15_thread + #disarmed_offset], #disarmed_value\t");
st->print("cmpl [r15_thread + #disarmed_guard_value_offset], #disarmed_guard_value\t");
st->print("\n\t");
st->print("je fast_entry\t");
st->print("\n\t");

View File

@ -30,15 +30,11 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
ShouldNotReachHere();
}
void BarrierSetNMethod::arm(nmethod* nm, int value) {
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
ShouldNotReachHere();
}
void BarrierSetNMethod::disarm(nmethod* nm) {
int BarrierSetNMethod::guard_value(nmethod* nm) {
ShouldNotReachHere();
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
ShouldNotReachHere();
return false;
return -1;
}

View File

@ -88,7 +88,7 @@ BarrierSet::BarrierSet(BarrierSetAssembler* barrier_set_assembler,
void BarrierSet::on_thread_attach(Thread* thread) {
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
if (bs_nm != nullptr) {
thread->set_nmethod_disarm_value(bs_nm->disarmed_value());
thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
}
}

View File

@ -40,8 +40,8 @@
#include "runtime/threads.hpp"
#include "utilities/debug.hpp"
int BarrierSetNMethod::disarmed_value() const {
return *disarmed_value_address();
int BarrierSetNMethod::disarmed_guard_value() const {
return *disarmed_guard_value_address();
}
bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
@ -69,6 +69,14 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
return true;
}
void BarrierSetNMethod::disarm(nmethod* nm) {
set_guard_value(nm, disarmed_guard_value());
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
return guard_value(nm) != disarmed_guard_value();
}
bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
class OopKeepAliveClosure : public OopClosure {
public:
@ -102,24 +110,24 @@ bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
return true;
}
int* BarrierSetNMethod::disarmed_value_address() const {
int* BarrierSetNMethod::disarmed_guard_value_address() const {
return (int*) &_current_phase;
}
ByteSize BarrierSetNMethod::thread_disarmed_offset() const {
return Thread::nmethod_disarmed_offset();
ByteSize BarrierSetNMethod::thread_disarmed_guard_value_offset() const {
return Thread::nmethod_disarmed_guard_value_offset();
}
class BarrierSetNMethodArmClosure : public ThreadClosure {
private:
int _disarm_value;
int _disarmed_guard_value;
public:
BarrierSetNMethodArmClosure(int disarm_value) :
_disarm_value(disarm_value) {}
BarrierSetNMethodArmClosure(int disarmed_guard_value) :
_disarmed_guard_value(disarmed_guard_value) {}
virtual void do_thread(Thread* thread) {
thread->set_nmethod_disarm_value(_disarm_value);
thread->set_nmethod_disarmed_guard_value(_disarmed_guard_value);
}
};

View File

@ -41,16 +41,18 @@ public:
bool supports_entry_barrier(nmethod* nm);
virtual bool nmethod_entry_barrier(nmethod* nm);
virtual ByteSize thread_disarmed_offset() const;
virtual int* disarmed_value_address() const;
virtual ByteSize thread_disarmed_guard_value_offset() const;
virtual int* disarmed_guard_value_address() const;
int disarmed_value() const;
int disarmed_guard_value() const;
static int nmethod_stub_entry_barrier(address* return_address_ptr);
bool nmethod_osr_entry_barrier(nmethod* nm);
bool is_armed(nmethod* nm);
void disarm(nmethod* nm);
void arm(nmethod* nm, int arm_value);
int guard_value(nmethod* nm);
void set_guard_value(nmethod* nm, int value);
void arm_all_nmethods();
};

View File

@ -104,7 +104,7 @@ void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
if (bs_nm != NULL) {
thread->set_nmethod_disarm_value(bs_nm->disarmed_value());
thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
}
if (ShenandoahStackWatermarkBarrier) {

View File

@ -210,7 +210,7 @@ public:
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
// Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get
// mark_as_maybe_on_stack() callbacks when they are used again.
_bs->arm(nm, 0);
_bs->set_guard_value(nm, 0);
}
// Clear compiled ICs and exception caches

View File

@ -68,10 +68,10 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
return true;
}
int* ZBarrierSetNMethod::disarmed_value_address() const {
int* ZBarrierSetNMethod::disarmed_guard_value_address() const {
return (int*)ZAddressBadMaskHighOrderBitsAddr;
}
ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
ByteSize ZBarrierSetNMethod::thread_disarmed_guard_value_offset() const {
return ZThreadLocalData::nmethod_disarmed_offset();
}

View File

@ -34,8 +34,8 @@ protected:
virtual bool nmethod_entry_barrier(nmethod* nm);
public:
virtual ByteSize thread_disarmed_offset() const;
virtual int* disarmed_value_address() const;
virtual ByteSize thread_disarmed_guard_value_offset() const;
virtual int* disarmed_guard_value_address() const;
};
#endif // SHARE_GC_Z_ZBARRIERSETNMETHOD_HPP

View File

@ -197,11 +197,9 @@ void ZNMethod::disarm(nmethod* nm) {
bs->disarm(nm);
}
void ZNMethod::arm(nmethod* nm, int arm_value) {
void ZNMethod::set_guard_value(nmethod* nm, int value) {
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs != NULL) {
bs->arm(nm, arm_value);
}
bs->set_guard_value(nm, value);
}
void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
@ -300,9 +298,9 @@ public:
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (ZNMethod::is_armed(nm)) {
// Heal oops and disarm
// Heal oops and arm phase invariantly
ZNMethod::nmethod_oops_barrier(nm);
ZNMethod::arm(nm, 0);
ZNMethod::set_guard_value(nm, 0);
}
// Clear compiled ICs and exception caches

View File

@ -46,7 +46,7 @@ public:
static bool is_armed(nmethod* nm);
static void disarm(nmethod* nm);
static void arm(nmethod* nm, int arm_value);
static void set_guard_value(nmethod* nm, int value);
static void nmethod_oops_do(nmethod* nm, OopClosure* cl);
static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl);

View File

@ -115,16 +115,16 @@ class Thread: public ThreadShadow {
// On AArch64, the high order 32 bits are used by a "patching epoch" number
// which reflects if this thread has executed the required fences, after
// an nmethod gets disarmed. The low order 32 bit denote the disarm value.
uint64_t _nmethod_disarm_value;
// an nmethod gets disarmed. The low order 32 bits denote the disarmed value.
uint64_t _nmethod_disarmed_guard_value;
public:
void set_nmethod_disarm_value(int value) {
_nmethod_disarm_value = (uint64_t)(uint32_t)value;
void set_nmethod_disarmed_guard_value(int value) {
_nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value;
}
static ByteSize nmethod_disarmed_offset() {
ByteSize offset = byte_offset_of(Thread, _nmethod_disarm_value);
static ByteSize nmethod_disarmed_guard_value_offset() {
ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value);
// At least on x86_64, nmethod entry barrier encodes disarmed value offset
// in instruction as disp8 immed
assert(in_bytes(offset) < 128, "Offset >= 128");