8256072: Eliminate JVMTI tagmap rehashing

Reviewed-by: kbarrett, eosterlund
This commit is contained in:
Coleen Phillimore 2022-11-03 17:27:00 +00:00
parent 4338f527aa
commit 94eb25a4f1
18 changed files with 19 additions and 119 deletions

View File

@ -652,11 +652,6 @@ bool CollectedHeap::is_archived_object(oop object) const {
return false; return false;
} }
uint32_t CollectedHeap::hash_oop(oop obj) const {
const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
return static_cast<uint32_t>(addr >> LogMinObjAlignment);
}
// It's the caller's responsibility to ensure glitch-freedom // It's the caller's responsibility to ensure glitch-freedom
// (if required). // (if required).
void CollectedHeap::update_capacity_and_used_at_gc() { void CollectedHeap::update_capacity_and_used_at_gc() {

View File

@ -280,8 +280,6 @@ class CollectedHeap : public CHeapObj<mtGC> {
DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); }) DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
virtual uint32_t hash_oop(oop obj) const;
void set_gc_cause(GCCause::Cause v); void set_gc_cause(GCCause::Cause v);
GCCause::Cause gc_cause() { return _gc_cause; } GCCause::Cause gc_cause() { return _gc_cause; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,13 +48,6 @@ void notify_jvmti_tagmaps() {
// the tagmap's oopstorage notification handler to not care whether it's // the tagmap's oopstorage notification handler to not care whether it's
// invoked by STW or concurrent reference processing. // invoked by STW or concurrent reference processing.
JvmtiTagMap::set_needs_cleaning(); JvmtiTagMap::set_needs_cleaning();
// Notify JVMTI tagmaps that a STW collection may have moved objects, so
// the tagmaps need rehashing. This isn't the right place for this, but
// is convenient because all the STW collectors use WeakProcessor. One
// problem is that the end of a G1 concurrent collection also comes here,
// possibly triggering unnecessary rehashes.
JvmtiTagMap::set_needs_rehashing();
#endif // INCLUDE_JVMTI #endif // INCLUDE_JVMTI
} }

View File

@ -606,9 +606,6 @@ void ShenandoahConcurrentGC::op_final_mark() {
ShenandoahCodeRoots::arm_nmethods(); ShenandoahCodeRoots::arm_nmethods();
ShenandoahStackWatermark::change_epoch_id(); ShenandoahStackWatermark::change_epoch_id();
// Notify JVMTI that oops are changed.
JvmtiTagMap::set_needs_rehashing();
if (ShenandoahPacing) { if (ShenandoahPacing) {
heap->pacer()->setup_for_evac(); heap->pacer()->setup_for_evac();
} }

View File

@ -145,10 +145,6 @@ bool ZCollectedHeap::requires_barriers(stackChunkOop obj) const {
return false; return false;
} }
uint32_t ZCollectedHeap::hash_oop(oop obj) const {
return _heap.hash_oop(ZOop::to_address(obj));
}
HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);

View File

@ -74,8 +74,6 @@ public:
virtual bool is_in(const void* p) const; virtual bool is_in(const void* p) const;
virtual bool requires_barriers(stackChunkOop obj) const; virtual bool requires_barriers(stackChunkOop obj) const;
virtual uint32_t hash_oop(oop obj) const;
virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS); virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,

View File

@ -436,9 +436,6 @@ void ZHeap::relocate_start() {
// Update statistics // Update statistics
ZStatHeap::set_at_relocate_start(_page_allocator.stats()); ZStatHeap::set_at_relocate_start(_page_allocator.stats());
// Notify JVMTI
JvmtiTagMap::set_needs_rehashing();
} }
void ZHeap::relocate() { void ZHeap::relocate() {

View File

@ -90,7 +90,6 @@ public:
size_t unsafe_max_tlab_alloc() const; size_t unsafe_max_tlab_alloc() const;
bool is_in(uintptr_t addr) const; bool is_in(uintptr_t addr) const;
uint32_t hash_oop(uintptr_t addr) const;
// Threads // Threads
uint active_workers() const; uint active_workers() const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,6 @@
#include "gc/z/zAddress.inline.hpp" #include "gc/z/zAddress.inline.hpp"
#include "gc/z/zForwardingTable.inline.hpp" #include "gc/z/zForwardingTable.inline.hpp"
#include "gc/z/zHash.inline.hpp"
#include "gc/z/zMark.inline.hpp" #include "gc/z/zMark.inline.hpp"
#include "gc/z/zPage.inline.hpp" #include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp" #include "gc/z/zPageTable.inline.hpp"
@ -43,11 +42,6 @@ inline ReferenceDiscoverer* ZHeap::reference_discoverer() {
return &_reference_processor; return &_reference_processor;
} }
inline uint32_t ZHeap::hash_oop(uintptr_t addr) const {
const uintptr_t offset = ZAddress::offset(addr);
return ZHash::address_to_uint32(offset);
}
inline bool ZHeap::is_object_live(uintptr_t addr) const { inline bool ZHeap::is_object_live(uintptr_t addr) const {
ZPage* page = _page_table.get(addr); ZPage* page = _page_table.get(addr);
return page->is_object_live(addr); return page->is_object_live(addr);

View File

@ -99,10 +99,7 @@ void oopDesc::verify(oopDesc* oop_desc) {
intptr_t oopDesc::slow_identity_hash() { intptr_t oopDesc::slow_identity_hash() {
// slow case; we have to acquire the micro lock in order to locate the header // slow case; we have to acquire the micro lock in order to locate the header
Thread* current = Thread::current(); Thread* current = Thread::current();
ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY return ObjectSynchronizer::FastHashCode(current, this);
HandleMark hm(current);
Handle object(current, this);
return ObjectSynchronizer::identity_hash_value_for(object);
} }
// used only for asserts and guarantees // used only for asserts and guarantees

View File

@ -293,6 +293,7 @@ class oopDesc {
// identity hash; returns the identity hash key (computes it if necessary) // identity hash; returns the identity hash key (computes it if necessary)
inline intptr_t identity_hash(); inline intptr_t identity_hash();
intptr_t slow_identity_hash(); intptr_t slow_identity_hash();
inline bool fast_no_hash_check();
// marks are forwarded to stack when object is locked // marks are forwarded to stack when object is locked
inline bool has_displaced_mark() const; inline bool has_displaced_mark() const;

View File

@ -356,6 +356,14 @@ intptr_t oopDesc::identity_hash() {
} }
} }
// This checks fast simple case of whether the oop has_no_hash,
// to optimize JVMTI table lookup.
bool oopDesc::fast_no_hash_check() {
markWord mrk = mark_acquire();
assert(!mrk.is_marked(), "should never be marked");
return mrk.is_unlocked() && mrk.has_no_hash();
}
bool oopDesc::has_displaced_mark() const { bool oopDesc::has_displaced_mark() const {
return mark().has_displaced_mark_helper(); return mark().has_displaced_mark_helper();
} }

View File

@ -76,7 +76,6 @@ bool JvmtiTagMap::_has_object_free_events = false;
JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) : JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
_env(env), _env(env),
_lock(Mutex::nosafepoint, "JvmtiTagMap_lock"), _lock(Mutex::nosafepoint, "JvmtiTagMap_lock"),
_needs_rehashing(false),
_needs_cleaning(false), _needs_cleaning(false),
_posting_events(false) { _posting_events(false) {
@ -136,7 +135,7 @@ bool JvmtiTagMap::is_empty() {
return hashmap()->is_empty(); return hashmap()->is_empty();
} }
// This checks for posting and rehashing before operations that // This checks for posting before operations that use
// this tagmap table. // this tagmap table.
void JvmtiTagMap::check_hashmap(GrowableArray<jlong>* objects) { void JvmtiTagMap::check_hashmap(GrowableArray<jlong>* objects) {
assert(is_locked(), "checking"); assert(is_locked(), "checking");
@ -148,14 +147,9 @@ void JvmtiTagMap::check_hashmap(GrowableArray<jlong>* objects) {
env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) { env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) {
remove_dead_entries_locked(objects); remove_dead_entries_locked(objects);
} }
if (_needs_rehashing) {
log_info(jvmti, table)("TagMap table needs rehashing");
hashmap()->rehash();
_needs_rehashing = false;
}
} }
// This checks for posting and rehashing and is called from the heap walks. // This checks for posting and is called from the heap walks.
void JvmtiTagMap::check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects) { void JvmtiTagMap::check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects) {
assert(SafepointSynchronize::is_at_safepoint(), "called from safepoints"); assert(SafepointSynchronize::is_at_safepoint(), "called from safepoints");
@ -2932,21 +2926,6 @@ void JvmtiTagMap::follow_references(jint heap_filter,
post_dead_objects(&dead_objects); post_dead_objects(&dead_objects);
} }
// Concurrent GC needs to call this in relocation pause, so after the objects are moved
// and have their new addresses, the table can be rehashed.
void JvmtiTagMap::set_needs_rehashing() {
assert(SafepointSynchronize::is_at_safepoint(), "called in gc pause");
assert(Thread::current()->is_VM_thread(), "should be the VM thread");
JvmtiEnvIterator it;
for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
JvmtiTagMap* tag_map = env->tag_map_acquire();
if (tag_map != NULL) {
tag_map->_needs_rehashing = true;
}
}
}
// Verify gc_notification follows set_needs_cleaning. // Verify gc_notification follows set_needs_cleaning.
DEBUG_ONLY(static bool notified_needs_cleaning = false;) DEBUG_ONLY(static bool notified_needs_cleaning = false;)

View File

@ -40,7 +40,6 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
JvmtiEnv* _env; // the jvmti environment JvmtiEnv* _env; // the jvmti environment
Monitor _lock; // lock for this tag map Monitor _lock; // lock for this tag map
JvmtiTagMapTable* _hashmap; // the hashmap for tags JvmtiTagMapTable* _hashmap; // the hashmap for tags
bool _needs_rehashing;
bool _needs_cleaning; bool _needs_cleaning;
bool _posting_events; bool _posting_events;
@ -115,7 +114,6 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
void post_dead_objects(GrowableArray<jlong>* const objects); void post_dead_objects(GrowableArray<jlong>* const objects);
static void check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects); static void check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects);
static void set_needs_rehashing() NOT_JVMTI_RETURN;
static void set_needs_cleaning() NOT_JVMTI_RETURN; static void set_needs_cleaning() NOT_JVMTI_RETURN;
static void gc_notification(size_t num_dead_entries) NOT_JVMTI_RETURN; static void gc_notification(size_t num_dead_entries) NOT_JVMTI_RETURN;

View File

@ -85,7 +85,7 @@ void JvmtiTagMapTable::free_entry(JvmtiTagMapEntry* entry) {
unsigned int JvmtiTagMapTable::compute_hash(oop obj) { unsigned int JvmtiTagMapTable::compute_hash(oop obj) {
assert(obj != NULL, "obj is null"); assert(obj != NULL, "obj is null");
return Universe::heap()->hash_oop(obj); return obj->identity_hash();
} }
JvmtiTagMapEntry* JvmtiTagMapTable::find(int index, unsigned int hash, oop obj) { JvmtiTagMapEntry* JvmtiTagMapTable::find(int index, unsigned int hash, oop obj) {
@ -113,6 +113,10 @@ JvmtiTagMapEntry* JvmtiTagMapTable::find(int index, unsigned int hash, oop obj)
} }
JvmtiTagMapEntry* JvmtiTagMapTable::find(oop obj) { JvmtiTagMapEntry* JvmtiTagMapTable::find(oop obj) {
if (obj->fast_no_hash_check()) {
// Objects in the table all have a hashcode.
return NULL;
}
unsigned int hash = compute_hash(obj); unsigned int hash = compute_hash(obj);
int index = hash_to_index(hash); int index = hash_to_index(hash);
return find(index, hash, obj); return find(index, hash, obj);
@ -220,48 +224,3 @@ void JvmtiTagMapTable::remove_dead_entries(GrowableArray<jlong>* objects) {
log_info(jvmti, table) ("JvmtiTagMap entries counted %d removed %d", log_info(jvmti, table) ("JvmtiTagMap entries counted %d removed %d",
oops_counted, oops_removed); oops_counted, oops_removed);
} }
// Rehash oops in the table
void JvmtiTagMapTable::rehash() {
ResourceMark rm;
GrowableArray<JvmtiTagMapEntry*> moved_entries;
int oops_counted = 0;
for (int i = 0; i < table_size(); ++i) {
JvmtiTagMapEntry** p = bucket_addr(i);
JvmtiTagMapEntry* entry = bucket(i);
while (entry != NULL) {
oops_counted++;
oop l = entry->object_no_keepalive();
if (l != NULL) {
// Check if oop has moved, ie its hashcode is different
// than the one entered in the table.
unsigned int new_hash = compute_hash(l);
if (entry->hash() != new_hash) {
*p = entry->next();
entry->set_hash(new_hash);
unlink_entry(entry);
moved_entries.push(entry);
} else {
p = entry->next_addr();
}
} else {
// Skip removed oops. They may still have to be posted.
p = entry->next_addr();
}
// get next entry
entry = *p;
}
}
int rehash_len = moved_entries.length();
// Now add back in the entries that were removed.
for (int i = 0; i < rehash_len; i++) {
JvmtiTagMapEntry* moved_entry = moved_entries.at(i);
int index = hash_to_index(moved_entry->hash());
Hashtable<WeakHandle, mtServiceability>::add_entry(index, moved_entry);
}
log_info(jvmti, table) ("JvmtiTagMap entries counted %d rehashed %d",
oops_counted, rehash_len);
}

View File

@ -89,7 +89,6 @@ public:
// Cleanup cleared entries and store dead object tags in objects array // Cleanup cleared entries and store dead object tags in objects array
void remove_dead_entries(GrowableArray<jlong>* objects); void remove_dead_entries(GrowableArray<jlong>* objects);
void rehash();
void clear(); void clear();
}; };

View File

@ -978,13 +978,6 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
} }
} }
// Deprecated -- use FastHashCode() instead.
intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
return FastHashCode(Thread::current(), obj());
}
bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current, bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
Handle h_obj) { Handle h_obj) {
assert(current == JavaThread::current(), "Can only be called on current thread"); assert(current == JavaThread::current(), "Can only be called on current thread");

View File

@ -178,7 +178,6 @@ class ObjectSynchronizer : AllStatic {
// Returns the identity hash value for an oop // Returns the identity hash value for an oop
// NOTE: It may cause monitor inflation // NOTE: It may cause monitor inflation
static intptr_t identity_hash_value_for(Handle obj);
static intptr_t FastHashCode(Thread* current, oop obj); static intptr_t FastHashCode(Thread* current, oop obj);
// java.lang.Thread support // java.lang.Thread support