8329597: C2: Intrinsify Reference.clear

Reviewed-by: rcastanedalo, eosterlund, kvn
This commit is contained in:
Aleksey Shipilev 2024-10-16 14:08:10 +00:00
parent 1cc32237ae
commit 7625b29920
26 changed files with 362 additions and 21 deletions

View File

@ -1189,6 +1189,8 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()));
} else if (stub->is_atomic()) {
__ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()));
} else if (stub->is_nokeepalive()) {
__ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr()));
} else {
__ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()));
}
@ -1307,11 +1309,11 @@ Label* ZLoadBarrierStubC2Aarch64::entry() {
return ZBarrierStubC2::entry();
}
ZStoreBarrierStubC2Aarch64::ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic)
: ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic), _deferred_emit(false) {}
ZStoreBarrierStubC2Aarch64::ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive)
: ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic, is_nokeepalive), _deferred_emit(false) {}
ZStoreBarrierStubC2Aarch64* ZStoreBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) {
ZStoreBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2Aarch64(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic);
ZStoreBarrierStubC2Aarch64* ZStoreBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive) {
ZStoreBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2Aarch64(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic, is_nokeepalive);
register_stub(stub);
return stub;
}

View File

@ -280,10 +280,10 @@ class ZStoreBarrierStubC2Aarch64 : public ZStoreBarrierStubC2 {
private:
bool _deferred_emit;
ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive);
public:
static ZStoreBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
static ZStoreBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive);
virtual void emit_code(MacroAssembler& masm);
};

View File

@ -91,7 +91,8 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
z_color(masm, node, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
bool is_nokeepalive = (node->barrier_data() & ZBarrierNoKeepalive) != 0;
ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic, is_nokeepalive);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}

View File

@ -943,6 +943,8 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr(), R3_ARG1);
} else if (stub->is_atomic()) {
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), R3_ARG1);
} else if (stub->is_nokeepalive()) {
__ call_VM_leaf(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr(), R3_ARG1);
} else {
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), R3_ARG1);
}

View File

@ -83,7 +83,8 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Register
z_color(masm, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic);
bool is_nokeepalive = (node->barrier_data() & ZBarrierNoKeepalive) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic, is_nokeepalive);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}

View File

@ -761,6 +761,8 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()));
} else if (stub->is_atomic()) {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()));
} else if (stub->is_nokeepalive()) {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr()));
} else {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()));
}

View File

@ -82,7 +82,8 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
z_color(masm, node, rnew_zpointer, rnew_zaddress, tmp);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
bool is_nokeepalive = (node->barrier_data() & ZBarrierNoKeepalive) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic, is_nokeepalive);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}

View File

@ -1260,6 +1260,8 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()));
} else if (stub->is_atomic()) {
__ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()));
} else if (stub->is_nokeepalive()) {
__ call(RuntimeAddress(ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr()));
} else {
__ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()));
}

View File

@ -91,7 +91,8 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
}
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
bool is_nokeepalive = (node->barrier_data() & ZBarrierNoKeepalive) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic, is_nokeepalive);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}

View File

@ -469,6 +469,8 @@ class methodHandle;
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
do_intrinsic(_Reference_refersTo0, java_lang_ref_Reference, refersTo0_name, object_boolean_signature, F_RN) \
do_intrinsic(_PhantomReference_refersTo0, java_lang_ref_PhantomReference, refersTo0_name, object_boolean_signature, F_RN) \
do_intrinsic(_Reference_clear0, java_lang_ref_Reference, clear0_name, void_method_signature, F_RN) \
do_intrinsic(_PhantomReference_clear0, java_lang_ref_PhantomReference, clear0_name, void_method_signature, F_RN) \
\
/* support for com.sun.crypto.provider.AESCrypt and some of its callers */ \
do_class(com_sun_crypto_provider_aescrypt, "com/sun/crypto/provider/AESCrypt") \

View File

@ -426,6 +426,7 @@ class SerializeClosure;
template(cs_name, "cs") \
template(get_name, "get") \
template(refersTo0_name, "refersTo0") \
template(clear0_name, "clear0") \
template(put_name, "put") \
template(type_name, "type") \
template(findNative_name, "findNative") \

View File

@ -327,6 +327,7 @@ Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) co
bool in_heap = (decorators & IN_HEAP) != 0;
bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
bool need_store_barrier = !(tightly_coupled_alloc && use_ReduceInitialCardMarks()) && (in_heap || anonymous);
bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
if (access.is_oop() && need_store_barrier) {
access.set_barrier_data(get_store_barrier(access));
if (tightly_coupled_alloc) {
@ -336,6 +337,10 @@ Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) co
access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
}
}
if (no_keepalive) {
// No keep-alive means no need for the pre-barrier.
access.set_barrier_data(access.barrier_data() & ~G1C2BarrierPre);
}
return BarrierSetC2::store_at_resolved(access, val);
}

View File

@ -480,10 +480,17 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
const TypePtr* adr_type = access.addr().type();
Node* adr = access.addr().node();
bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;
if (!access.is_oop()) {
return BarrierSetC2::store_at_resolved(access, val);
}
if (no_keepalive) {
// No keep-alive means no need for the pre-barrier.
return BarrierSetC2::store_at_resolved(access, val);
}
if (access.is_parse_access()) {
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
GraphKit* kit = parse_access.kit();

View File

@ -239,21 +239,23 @@ void ZLoadBarrierStubC2::emit_code(MacroAssembler& masm) {
ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, static_cast<ZLoadBarrierStubC2*>(this));
}
ZStoreBarrierStubC2* ZStoreBarrierStubC2::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) {
ZStoreBarrierStubC2* ZStoreBarrierStubC2::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive) {
AARCH64_ONLY(fatal("Should use ZStoreBarrierStubC2Aarch64::create"));
ZStoreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic);
ZStoreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic, is_nokeepalive);
register_stub(stub);
return stub;
}
ZStoreBarrierStubC2::ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic)
ZStoreBarrierStubC2::ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer,
bool is_native, bool is_atomic, bool is_nokeepalive)
: ZBarrierStubC2(node),
_ref_addr(ref_addr),
_new_zaddress(new_zaddress),
_new_zpointer(new_zpointer),
_is_native(is_native),
_is_atomic(is_atomic) {}
_is_atomic(is_atomic),
_is_nokeepalive(is_nokeepalive) {}
Address ZStoreBarrierStubC2::ref_addr() const {
return _ref_addr;
@ -275,6 +277,10 @@ bool ZStoreBarrierStubC2::is_atomic() const {
return _is_atomic;
}
bool ZStoreBarrierStubC2::is_nokeepalive() const {
return _is_nokeepalive;
}
void ZStoreBarrierStubC2::emit_code(MacroAssembler& masm) {
ZBarrierSet::assembler()->generate_c2_store_barrier_stub(&masm, static_cast<ZStoreBarrierStubC2*>(this));
}

View File

@ -79,18 +79,20 @@ private:
const Register _new_zpointer;
const bool _is_native;
const bool _is_atomic;
const bool _is_nokeepalive;
protected:
ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive);
public:
static ZStoreBarrierStubC2* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
static ZStoreBarrierStubC2* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic, bool is_nokeepalive);
Address ref_addr() const;
Register new_zaddress() const;
Register new_zpointer() const;
bool is_native() const;
bool is_atomic() const;
bool is_nokeepalive() const;
virtual void emit_code(MacroAssembler& masm);
};

View File

@ -59,6 +59,10 @@ JRT_LEAF(void, ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing(oo
ZBarrier::store_barrier_on_heap_oop_field((zpointer*)p, false /* heal */);
JRT_END
JRT_LEAF(void, ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing(oop* p))
ZBarrier::no_keep_alive_store_barrier_on_heap_oop_field((zpointer*)p);
JRT_END
JRT_LEAF(void, ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing(oop* p))
ZBarrier::store_barrier_on_native_oop_field((zpointer*)p, false /* heal */);
JRT_END
@ -126,6 +130,10 @@ address ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr() {
return reinterpret_cast<address>(store_barrier_on_oop_field_without_healing);
}
address ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing_addr() {
return reinterpret_cast<address>(no_keepalive_store_barrier_on_oop_field_without_healing);
}
address ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr() {
return reinterpret_cast<address>(store_barrier_on_native_oop_field_without_healing);
}

View File

@ -40,6 +40,7 @@ private:
static oopDesc* no_keepalive_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p);
static void store_barrier_on_oop_field_with_healing(oop* p);
static void store_barrier_on_oop_field_without_healing(oop* p);
static void no_keepalive_store_barrier_on_oop_field_without_healing(oop* p);
static void store_barrier_on_native_oop_field_without_healing(oop* p);
static void load_barrier_on_oop_array(oop* p, size_t length);
static void clone(oopDesc* src, oopDesc* dst, size_t size);
@ -54,6 +55,7 @@ public:
static address no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr();
static address store_barrier_on_oop_field_with_healing_addr();
static address store_barrier_on_oop_field_without_healing_addr();
static address no_keepalive_store_barrier_on_oop_field_without_healing_addr();
static address store_barrier_on_native_oop_field_without_healing_addr();
static address load_barrier_on_oop_array_addr();
static address clone_addr();

View File

@ -847,6 +847,7 @@
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::no_keepalive_store_barrier_on_oop_field_without_healing)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing)) \
ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing)) \

View File

@ -766,6 +766,8 @@ bool C2Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
case vmIntrinsics::_Reference_get:
case vmIntrinsics::_Reference_refersTo0:
case vmIntrinsics::_PhantomReference_refersTo0:
case vmIntrinsics::_Reference_clear0:
case vmIntrinsics::_PhantomReference_clear0:
case vmIntrinsics::_Class_cast:
case vmIntrinsics::_aescrypt_encryptBlock:
case vmIntrinsics::_aescrypt_decryptBlock:

View File

@ -580,6 +580,8 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_Reference_get: return inline_reference_get();
case vmIntrinsics::_Reference_refersTo0: return inline_reference_refersTo0(false);
case vmIntrinsics::_PhantomReference_refersTo0: return inline_reference_refersTo0(true);
case vmIntrinsics::_Reference_clear0: return inline_reference_clear0(false);
case vmIntrinsics::_PhantomReference_clear0: return inline_reference_clear0(true);
case vmIntrinsics::_Class_cast: return inline_Class_cast();
@ -6962,6 +6964,48 @@ bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) {
return true;
}
//----------------------------inline_reference_clear0----------------------------
// void java.lang.ref.Reference.clear0();
// void java.lang.ref.PhantomReference.clear0();
bool LibraryCallKit::inline_reference_clear0(bool is_phantom) {
// This matches the implementation in JVM_ReferenceClear, see the comments there.
// Get arguments
Node* reference_obj = null_check_receiver();
if (stopped()) return true;
// Common access parameters
DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE;
decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF);
Node* referent_field_addr = basic_plus_adr(reference_obj, java_lang_ref_Reference::referent_offset());
const TypePtr* referent_field_addr_type = _gvn.type(referent_field_addr)->isa_ptr();
const Type* val_type = TypeOopPtr::make_from_klass(env()->Object_klass());
Node* referent = access_load_at(reference_obj,
referent_field_addr,
referent_field_addr_type,
val_type,
T_OBJECT,
decorators);
IdealKit ideal(this);
#define __ ideal.
__ if_then(referent, BoolTest::ne, null());
sync_kit(ideal);
access_store_at(reference_obj,
referent_field_addr,
referent_field_addr_type,
null(),
val_type,
T_OBJECT,
decorators);
__ sync_kit(this);
__ end_if();
final_sync(ideal);
#undef __
return true;
}
Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString,
DecoratorSet decorators, bool is_static,

View File

@ -297,6 +297,7 @@ class LibraryCallKit : public GraphKit {
bool inline_divmod_methods(vmIntrinsics::ID id);
bool inline_reference_get();
bool inline_reference_refersTo0(bool is_phantom);
bool inline_reference_clear0(bool is_phantom);
bool inline_Class_cast();
bool inline_aescrypt_Block(vmIntrinsics::ID id);
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);

View File

@ -77,6 +77,19 @@ public non-sealed class PhantomReference<T> extends Reference<T> {
@IntrinsicCandidate
private native boolean refersTo0(Object o);
/* Override the implementation of Reference.clear.
* Phantom references are weaker than finalization, so the referent
* access needs to be handled differently for garbage collectors that
* do reference processing concurrently.
*/
@Override
void clearImpl() {
clear0();
}
@IntrinsicCandidate
private native void clear0();
/**
* Creates a new phantom reference that refers to the given object and
* is registered with the given queue.

View File

@ -403,13 +403,23 @@ public abstract sealed class Reference<T>
* necessary.
*/
public void clear() {
clearImpl();
}
/* Implementation of clear(). A simple assignment of the referent field
* won't do for some garbage collectors. There is the override for phantom
* references, which requires different semantics. This method is also
* used by enqueue().
*
* <p>This method exists only to avoid making clear0() virtual. Making
* clear0() virtual has the undesirable effect of C2 often preferring
* to call the native implementation over the intrinsic.
*/
void clearImpl() {
clear0();
}
/* Implementation of clear(), also used by enqueue(). A simple
* assignment of the referent field won't do for some garbage
* collectors.
*/
@IntrinsicCandidate
private native void clear0();
/* -- Operations on inactive FinalReferences -- */
@ -511,7 +521,7 @@ public abstract sealed class Reference<T>
* it was not registered with a queue when it was created
*/
public boolean enqueue() {
clear0(); // Intentionally clear0() rather than clear()
clearImpl(); // Intentionally clearImpl() to dispatch to overridden method, if needed
return this.queue.enqueue(this);
}

View File

@ -31,3 +31,9 @@ Java_java_lang_ref_PhantomReference_refersTo0(JNIEnv *env, jobject ref, jobject
{
return JVM_PhantomReferenceRefersTo(env, ref, o);
}
JNIEXPORT void JNICALL
Java_java_lang_ref_PhantomReference_clear0(JNIEnv *env, jobject ref)
{
JVM_ReferenceClear(env, ref);
}

View File

@ -0,0 +1,138 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package compiler.c2.irTests.gc;
import jdk.test.lib.Asserts;
import compiler.lib.ir_framework.*;
import jdk.test.whitebox.gc.GC;
import java.lang.ref.*;
import java.util.*;
/*
* @test
* @bug 8329597
* @summary Test that Reference.clear intrinsics are properly handled
* @library /test/lib /
* @build jdk.test.whitebox.WhiteBox
* @requires vm.compiler2.enabled
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI compiler.c2.irTests.gc.ReferenceClearTests
*/
public class ReferenceClearTests {
private static String[] args(String... add) {
List<String> args = new ArrayList<>();
// Use PerMethodTrapLimit=0 to compile all branches in the intrinsics.
args.add("-XX:PerMethodTrapLimit=0");
// Forcefully inline all methods to reach the intrinsic code.
args.add("-XX:CompileCommand=inline,compiler.c2.irTests.gc.ReferenceClearTests::*");
args.add("-XX:CompileCommand=inline,java.lang.ref.Reference::*");
args.add("-XX:CompileCommand=inline,java.lang.ref.PhantomReference::*");
// Mix in test config code.
args.addAll(Arrays.asList(add));
return args.toArray(new String[0]);
}
public static void main(String[] args) {
TestFramework framework = new TestFramework();
int idx = 0;
if (GC.isSelectedErgonomically() && GC.Serial.isSupported()) {
// Serial does not have SATB/keep-alive barriers at all.
// There are inter-generational barriers on stores, but they are
// folded away for null stores like clear().
framework.addScenarios(new Scenario(idx++, args(
"-XX:+UseSerialGC"
)));
}
if (GC.isSelectedErgonomically() && GC.Parallel.isSupported()) {
// Parallel does not have SATB/keep-alive barriers at all.
// There are inter-generational barriers on stores, but they
// should be folded away for null stores like clear().
framework.addScenarios(new Scenario(idx++, args(
"-XX:+UseParallelGC"
)));
}
if (GC.isSelectedErgonomically() && GC.G1.isSupported()) {
// G1 does not have barriers in C2 IR.
framework.addScenarios(new Scenario(idx++, args(
"-XX:+UseG1GC"
)));
}
if (GC.isSelectedErgonomically() && GC.Shenandoah.isSupported()) {
// Shenandoah has SATB/keep-alive barriers, but they should not be
// present clear()-s. There are load-reference barriers, which would
// confuse the tests, so we enable only SATB barriers.
framework.addScenarios(new Scenario(idx++, args(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:ShenandoahGCMode=passive",
"-XX:+ShenandoahSATBBarrier",
"-XX:+UseShenandoahGC"
)));
}
if (GC.isSelectedErgonomically() && GC.Z.isSupported()) {
// Z does not have barriers in C2 IR.
framework.addScenarios(new Scenario(idx++, args(
"-XX:+UseZGC"
)));
}
framework.start();
}
static final Object REF = new Object();
static final SoftReference<Object> SR = new SoftReference<>(REF);
static final WeakReference<Object> WR = new WeakReference<>(REF);
static final PhantomReference<Object> PR = new PhantomReference<>(REF, null);
// We assert there is only a single load and a single store of Reference.referent.
// This serves as signal that no GC barriers are emitted in IR.
@Test
@IR(counts = { IRNode.STORE, "1",
IRNode.LOAD, "1" })
public void soft() {
SR.clear();
}
@Test
@IR(counts = { IRNode.STORE, "1",
IRNode.LOAD, "1" })
public void weak() {
WR.clear();
}
@Test
@IR(counts = { IRNode.STORE, "1",
IRNode.LOAD, "1" })
public void phantom() {
PR.clear();
}
}

View File

@ -0,0 +1,81 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.openjdk.bench.java.lang.ref;
import java.lang.ref.*;
import java.util.concurrent.TimeUnit;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.infra.Blackhole;
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
@BenchmarkMode(Mode.AverageTime)
@Warmup(iterations = 3, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
public class ReferenceClear {
final Reference<Object> soft = new SoftReference<>(new Object());
final Reference<Object> weak = new WeakReference<>(new Object());
final Reference<Object> phantom = new PhantomReference<>(new Object(), null);
@Benchmark
public void soft() {
soft.clear();
}
@Benchmark
public void soft_new(Blackhole bh) {
Object ref = new Object();
bh.consume(ref);
Reference<Object> soft = new SoftReference<>(ref);
soft.clear();
}
@Benchmark
public void weak() {
weak.clear();
}
@Benchmark
public void weak_new(Blackhole bh) {
Object ref = new Object();
bh.consume(ref);
Reference<Object> weak = new WeakReference<>(ref);
weak.clear();
}
@Benchmark
public void phantom() {
phantom.clear();
}
@Benchmark
public void phantom_new(Blackhole bh) {
Object ref = new Object();
bh.consume(ref);
Reference<Object> phantom = new PhantomReference<>(ref, null);
phantom.clear();
}
}