8248391: Unify handling of all OopStorage instances in weak root processing
OopStorage instance now owned by subsystems, and generalize dead entry notification Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com> Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com> Reviewed-by: coleenp, tschatzl
This commit is contained in:
parent
55af701cc3
commit
a8d6a05ce8
src/hotspot/share
classfile
classLoaderData.cppdictionary.cppprotectionDomainCache.cppstringTable.cppstringTable.hppsystemDictionary.cppsystemDictionaryShared.cpp
gc
g1
shared
oopStorage.cppoopStorage.hppoopStorageParState.hppoopStorageSet.cppoopStorageSet.hppoopStorageSetParState.hppoopStorageSetParState.inline.hppweakProcessor.cppweakProcessor.hppweakProcessor.inline.hpp
shenandoah
shenandoahClosures.hppshenandoahClosures.inline.hppshenandoahHeap.cppshenandoahPhaseTimings.hppshenandoahRootProcessor.hppshenandoahRootProcessor.inline.hppshenandoahRootVerifier.cpp
z
jfr/leakprofiler/checkpoint
jvmci
memory
prims
runtime
test/hotspot
@ -488,7 +488,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
|
||||
void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
|
||||
if (loader_or_mirror() != NULL) {
|
||||
assert(_holder.is_null(), "never replace holders");
|
||||
_holder = WeakHandle(OopStorageSet::vm_weak(), loader_or_mirror);
|
||||
_holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,7 +655,7 @@ ClassLoaderData::~ClassLoaderData() {
|
||||
ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
|
||||
|
||||
// Release the WeakHandle
|
||||
_holder.release(OopStorageSet::vm_weak());
|
||||
_holder.release(Universe::vm_weak());
|
||||
|
||||
// Release C heap allocated hashtable for all the packages.
|
||||
if (_packages != NULL) {
|
||||
|
@ -407,14 +407,14 @@ oop SymbolPropertyEntry::method_type() const {
|
||||
}
|
||||
|
||||
void SymbolPropertyEntry::set_method_type(oop p) {
|
||||
_method_type = OopHandle(OopStorageSet::vm_global(), p);
|
||||
_method_type = OopHandle(Universe::vm_global(), p);
|
||||
}
|
||||
|
||||
void SymbolPropertyEntry::free_entry() {
|
||||
// decrement Symbol refcount here because hashtable doesn't.
|
||||
literal()->decrement_refcount();
|
||||
// Free OopHandle
|
||||
_method_type.release(OopStorageSet::vm_global());
|
||||
_method_type.release(Universe::vm_global());
|
||||
}
|
||||
|
||||
SymbolPropertyTable::SymbolPropertyTable(int table_size)
|
||||
|
@ -94,7 +94,7 @@ void ProtectionDomainCacheTable::unlink() {
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("protection domain unlinked at %d", i);
|
||||
}
|
||||
entry->literal().release(OopStorageSet::vm_weak());
|
||||
entry->literal().release(Universe::vm_weak());
|
||||
*p = entry->next();
|
||||
free_entry(entry);
|
||||
}
|
||||
@ -181,7 +181,7 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
|
||||
protection_domain->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
WeakHandle w(OopStorageSet::vm_weak(), protection_domain);
|
||||
WeakHandle w(Universe::vm_weak(), protection_domain);
|
||||
ProtectionDomainCacheEntry* p = new_entry(hash, w);
|
||||
Hashtable<WeakHandle, mtClass>::add_entry(index, p);
|
||||
return p;
|
||||
|
@ -85,8 +85,7 @@ static StringTableHash* _local_table = NULL;
|
||||
|
||||
volatile bool StringTable::_has_work = false;
|
||||
volatile bool StringTable::_needs_rehashing = false;
|
||||
|
||||
volatile size_t StringTable::_uncleaned_items_count = 0;
|
||||
OopStorage* StringTable::_oop_storage;
|
||||
|
||||
static size_t _current_size = 0;
|
||||
static volatile size_t _items_count = 0;
|
||||
@ -129,7 +128,7 @@ class StringTableConfig : public StackObj {
|
||||
return AllocateHeap(size, mtSymbol);
|
||||
}
|
||||
static void free_node(void* memory, Value const& value) {
|
||||
value.release(OopStorageSet::string_table_weak());
|
||||
value.release(StringTable::_oop_storage);
|
||||
FreeHeap(memory);
|
||||
StringTable::item_removed();
|
||||
}
|
||||
@ -211,30 +210,24 @@ void StringTable::create_table() {
|
||||
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
|
||||
_current_size, start_size_log_2);
|
||||
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN);
|
||||
_oop_storage = OopStorageSet::create_weak("StringTable Weak");
|
||||
_oop_storage->register_num_dead_callback(&gc_notification);
|
||||
}
|
||||
|
||||
size_t StringTable::item_added() {
|
||||
return Atomic::add(&_items_count, (size_t)1);
|
||||
}
|
||||
|
||||
size_t StringTable::add_items_to_clean(size_t ndead) {
|
||||
size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
|
||||
log_trace(stringtable)(
|
||||
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
|
||||
_uncleaned_items_count, ndead, total);
|
||||
return total;
|
||||
}
|
||||
|
||||
void StringTable::item_removed() {
|
||||
Atomic::add(&_items_count, (size_t)-1);
|
||||
}
|
||||
|
||||
double StringTable::get_load_factor() {
|
||||
return (double)_items_count/_current_size;
|
||||
return double(_items_count)/double(_current_size);
|
||||
}
|
||||
|
||||
double StringTable::get_dead_factor() {
|
||||
return (double)_uncleaned_items_count/_current_size;
|
||||
double StringTable::get_dead_factor(size_t num_dead) {
|
||||
return double(num_dead)/double(_current_size);
|
||||
}
|
||||
|
||||
size_t StringTable::table_size() {
|
||||
@ -243,7 +236,7 @@ size_t StringTable::table_size() {
|
||||
|
||||
void StringTable::trigger_concurrent_work() {
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
_has_work = true;
|
||||
Atomic::store(&_has_work, true);
|
||||
Service_lock->notify_all();
|
||||
}
|
||||
|
||||
@ -368,7 +361,7 @@ oop StringTable::do_intern(Handle string_or_null_h, const jchar* name,
|
||||
bool rehash_warning;
|
||||
do {
|
||||
// Callers have already looked up the String using the jchar* name, so just go to add.
|
||||
WeakHandle wh(OopStorageSet::string_table_weak(), string_h);
|
||||
WeakHandle wh(_oop_storage, string_h);
|
||||
// The hash table takes ownership of the WeakHandle, even if it's not inserted.
|
||||
if (_local_table->insert(THREAD, lookup, wh, &rehash_warning)) {
|
||||
update_needs_rehash(rehash_warning);
|
||||
@ -449,13 +442,15 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item);
|
||||
}
|
||||
|
||||
void StringTable::check_concurrent_work() {
|
||||
if (_has_work) {
|
||||
void StringTable::gc_notification(size_t num_dead) {
|
||||
log_trace(stringtable)("Uncleaned items:" SIZE_FORMAT, num_dead);
|
||||
|
||||
if (has_work()) {
|
||||
return;
|
||||
}
|
||||
|
||||
double load_factor = StringTable::get_load_factor();
|
||||
double dead_factor = StringTable::get_dead_factor();
|
||||
double dead_factor = StringTable::get_dead_factor(num_dead);
|
||||
// We should clean/resize if we have more dead than alive,
|
||||
// more items than preferred load factor or
|
||||
// more dead items than water mark.
|
||||
@ -468,8 +463,11 @@ void StringTable::check_concurrent_work() {
|
||||
}
|
||||
}
|
||||
|
||||
bool StringTable::has_work() {
|
||||
return Atomic::load_acquire(&_has_work);
|
||||
}
|
||||
|
||||
void StringTable::do_concurrent_work(JavaThread* jt) {
|
||||
_has_work = false;
|
||||
double load_factor = get_load_factor();
|
||||
log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor);
|
||||
// We prefer growing, since that also removes dead items
|
||||
@ -478,6 +476,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) {
|
||||
} else {
|
||||
clean_dead_entries(jt);
|
||||
}
|
||||
Atomic::release_store(&_has_work, false);
|
||||
}
|
||||
|
||||
// Rehash
|
||||
|
@ -46,23 +46,26 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
friend class StringTableCreateEntry;
|
||||
|
||||
static volatile bool _has_work;
|
||||
static volatile size_t _uncleaned_items_count;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
static volatile bool _needs_rehashing;
|
||||
|
||||
static OopStorage* _oop_storage;
|
||||
|
||||
static void grow(JavaThread* jt);
|
||||
static void clean_dead_entries(JavaThread* jt);
|
||||
|
||||
static double get_load_factor();
|
||||
static double get_dead_factor();
|
||||
static double get_dead_factor(size_t num_dead);
|
||||
|
||||
static void check_concurrent_work();
|
||||
// GC support
|
||||
|
||||
// Callback for GC to notify of changes that might require cleaning or resize.
|
||||
static void gc_notification(size_t num_dead);
|
||||
static void trigger_concurrent_work();
|
||||
|
||||
static size_t item_added();
|
||||
static void item_removed();
|
||||
static size_t add_items_to_clean(size_t ndead);
|
||||
|
||||
static oop intern(Handle string_or_null_h, const jchar* name, int len, TRAPS);
|
||||
static oop do_intern(Handle string_or_null, const jchar* name, int len, uintx hash, TRAPS);
|
||||
@ -79,20 +82,7 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
static void create_table();
|
||||
|
||||
static void do_concurrent_work(JavaThread* jt);
|
||||
static bool has_work() { return _has_work; }
|
||||
|
||||
// GC support
|
||||
|
||||
// Must be called before a parallel walk where strings might die.
|
||||
static void reset_dead_counter() { _uncleaned_items_count = 0; }
|
||||
|
||||
// After the parallel walk this method must be called to trigger
|
||||
// cleaning. Note it might trigger a resize instead.
|
||||
static void finish_dead_counter() { check_concurrent_work(); }
|
||||
|
||||
// If GC uses ParState directly it should add the number of cleared
|
||||
// strings to this method.
|
||||
static void inc_dead_counter(size_t ndead) { add_items_to_clean(ndead); }
|
||||
static bool has_work();
|
||||
|
||||
// Probing
|
||||
static oop lookup(Symbol* symbol);
|
||||
|
@ -176,7 +176,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
|
||||
vmSymbols::void_classloader_signature(),
|
||||
CHECK);
|
||||
|
||||
_java_system_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
|
||||
_java_system_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());
|
||||
|
||||
JavaCalls::call_static(&result,
|
||||
class_loader_klass,
|
||||
@ -184,7 +184,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
|
||||
vmSymbols::void_classloader_signature(),
|
||||
CHECK);
|
||||
|
||||
_java_platform_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
|
||||
_java_platform_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());
|
||||
}
|
||||
|
||||
ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, bool create_mirror_cld) {
|
||||
@ -2030,7 +2030,7 @@ void SystemDictionary::initialize(TRAPS) {
|
||||
|
||||
// Allocate private object used as system class loader lock
|
||||
oop lock_obj = oopFactory::new_intArray(0, CHECK);
|
||||
_system_loader_lock_obj = OopHandle(OopStorageSet::vm_global(), lock_obj);
|
||||
_system_loader_lock_obj = OopHandle(Universe::vm_global(), lock_obj);
|
||||
|
||||
// Initialize basic classes
|
||||
resolve_well_known_classes(CHECK);
|
||||
|
@ -1023,7 +1023,7 @@ void SystemDictionaryShared::allocate_shared_protection_domain_array(int size, T
|
||||
if (_shared_protection_domains.resolve() == NULL) {
|
||||
oop spd = oopFactory::new_objArray(
|
||||
SystemDictionary::ProtectionDomain_klass(), size, CHECK);
|
||||
_shared_protection_domains = OopHandle(OopStorageSet::vm_global(), spd);
|
||||
_shared_protection_domains = OopHandle(Universe::vm_global(), spd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1031,7 +1031,7 @@ void SystemDictionaryShared::allocate_shared_jar_url_array(int size, TRAPS) {
|
||||
if (_shared_jar_urls.resolve() == NULL) {
|
||||
oop sju = oopFactory::new_objArray(
|
||||
SystemDictionary::URL_klass(), size, CHECK);
|
||||
_shared_jar_urls = OopHandle(OopStorageSet::vm_global(), sju);
|
||||
_shared_jar_urls = OopHandle(Universe::vm_global(), sju);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1039,7 +1039,7 @@ void SystemDictionaryShared::allocate_shared_jar_manifest_array(int size, TRAPS)
|
||||
if (_shared_jar_manifests.resolve() == NULL) {
|
||||
oop sjm = oopFactory::new_objArray(
|
||||
SystemDictionary::Jar_Manifest_klass(), size, CHECK);
|
||||
_shared_jar_manifests = OopHandle(OopStorageSet::vm_global(), sjm);
|
||||
_shared_jar_manifests = OopHandle(Universe::vm_global(), sjm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,8 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_concurrent_start_to_mixed(),
|
||||
_collection_set(NULL),
|
||||
_g1h(NULL),
|
||||
_phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
|
||||
_phase_times_timer(gc_timer),
|
||||
_phase_times(NULL),
|
||||
_mark_remark_start_sec(0),
|
||||
_mark_cleanup_start_sec(0),
|
||||
_tenuring_threshold(MaxTenuringThreshold),
|
||||
@ -401,6 +402,15 @@ double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
return survivor_regions_evac_time;
|
||||
}
|
||||
|
||||
G1GCPhaseTimes* G1Policy::phase_times() const {
|
||||
// Lazy allocation because it must follow initialization of all the
|
||||
// OopStorage objects by various other subsystems.
|
||||
if (_phase_times == NULL) {
|
||||
_phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
|
||||
}
|
||||
return _phase_times;
|
||||
}
|
||||
|
||||
void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
|
||||
guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
|
||||
|
||||
|
@ -182,7 +182,9 @@ private:
|
||||
// Stash a pointer to the g1 heap.
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
STWGCTimer* _phase_times_timer;
|
||||
// Lazily initialized
|
||||
mutable G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
@ -300,7 +302,7 @@ public:
|
||||
|
||||
G1CollectorState* collector_state() const;
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
G1GCPhaseTimes* phase_times() const;
|
||||
|
||||
// Check the current value of the young list RSet length and
|
||||
// compare it against the last prediction. If the current value is
|
||||
|
@ -746,6 +746,7 @@ OopStorage::OopStorage(const char* name) :
|
||||
_deferred_updates(NULL),
|
||||
_allocation_mutex(make_oopstorage_mutex(name, "alloc", Mutex::oopstorage)),
|
||||
_active_mutex(make_oopstorage_mutex(name, "active", Mutex::oopstorage - 1)),
|
||||
_num_dead_callback(NULL),
|
||||
_allocation_count(0),
|
||||
_concurrent_iteration_count(0),
|
||||
_needs_cleanup(false)
|
||||
@ -814,6 +815,21 @@ static jlong cleanup_trigger_permit_time = 0;
|
||||
// too frequent.
|
||||
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
|
||||
|
||||
void OopStorage::register_num_dead_callback(NumDeadCallback f) {
|
||||
assert(_num_dead_callback == NULL, "Only one callback function supported");
|
||||
_num_dead_callback = f;
|
||||
}
|
||||
|
||||
void OopStorage::report_num_dead(size_t num_dead) const {
|
||||
if (_num_dead_callback != NULL) {
|
||||
_num_dead_callback(num_dead);
|
||||
}
|
||||
}
|
||||
|
||||
bool OopStorage::should_report_num_dead() const {
|
||||
return _num_dead_callback != NULL;
|
||||
}
|
||||
|
||||
void OopStorage::trigger_cleanup_if_needed() {
|
||||
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
|
||||
if (Atomic::load(&needs_cleanup_requested) &&
|
||||
@ -970,7 +986,8 @@ OopStorage::BasicParState::BasicParState(const OopStorage* storage,
|
||||
_block_count(0), // initialized properly below
|
||||
_next_block(0),
|
||||
_estimated_thread_count(estimated_thread_count),
|
||||
_concurrent(concurrent)
|
||||
_concurrent(concurrent),
|
||||
_num_dead(0)
|
||||
{
|
||||
assert(estimated_thread_count > 0, "estimated thread count must be positive");
|
||||
update_concurrent_iteration_count(1);
|
||||
@ -1043,6 +1060,18 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t OopStorage::BasicParState::num_dead() const {
|
||||
return Atomic::load(&_num_dead);
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
|
||||
Atomic::add(&_num_dead, num_dead);
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::report_num_dead() const {
|
||||
_storage->report_num_dead(Atomic::load(&_num_dead));
|
||||
}
|
||||
|
||||
const char* OopStorage::name() const { return _name; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -151,6 +151,24 @@ public:
|
||||
// Other clients must use serial iteration.
|
||||
template<bool concurrent, bool is_const> class ParState;
|
||||
|
||||
// Support GC callbacks reporting dead entries. This lets clients respond
|
||||
// to entries being cleared.
|
||||
|
||||
typedef void (*NumDeadCallback)(size_t num_dead);
|
||||
|
||||
// Used by a client to register a callback function with the GC.
|
||||
// precondition: No more than one registration per storage object.
|
||||
void register_num_dead_callback(NumDeadCallback f);
|
||||
|
||||
// Called by the GC after an iteration that may clear dead referents.
|
||||
// This calls the registered callback function, if any. num_dead is the
|
||||
// number of entries which were either already NULL or were cleared by the
|
||||
// iteration.
|
||||
void report_num_dead(size_t num_dead) const;
|
||||
|
||||
// Used by the GC to test whether a callback function has been registered.
|
||||
bool should_report_num_dead() const;
|
||||
|
||||
// Service thread cleanup support.
|
||||
|
||||
// Called by the service thread to process any pending cleanups for this
|
||||
@ -167,7 +185,7 @@ public:
|
||||
// cleanups to process.
|
||||
static void trigger_cleanup_if_needed();
|
||||
|
||||
// Called by the service thread (while holding Service_lock) to to test
|
||||
// Called by the service thread (while holding Service_lock) to test
|
||||
// for pending cleanup requests, and resets the request state to allow
|
||||
// recognition of new requests. Returns true if there was a pending
|
||||
// request.
|
||||
@ -222,6 +240,7 @@ private:
|
||||
Block* volatile _deferred_updates;
|
||||
Mutex* _allocation_mutex;
|
||||
Mutex* _active_mutex;
|
||||
NumDeadCallback _num_dead_callback;
|
||||
|
||||
// Volatile for racy unlocked accesses.
|
||||
volatile size_t _allocation_count;
|
||||
|
@ -133,6 +133,7 @@ class OopStorage::BasicParState {
|
||||
volatile size_t _next_block;
|
||||
uint _estimated_thread_count;
|
||||
bool _concurrent;
|
||||
volatile size_t _num_dead;
|
||||
|
||||
NONCOPYABLE(BasicParState);
|
||||
|
||||
@ -156,6 +157,10 @@ public:
|
||||
template<bool is_const, typename F> void iterate(F f);
|
||||
|
||||
static uint default_estimated_thread_count(bool concurrent);
|
||||
|
||||
size_t num_dead() const;
|
||||
void increment_num_dead(size_t num_dead);
|
||||
void report_num_dead() const;
|
||||
};
|
||||
|
||||
template<bool concurrent, bool is_const>
|
||||
@ -175,6 +180,10 @@ public:
|
||||
const OopStorage* storage() const { return _basic_state.storage(); }
|
||||
template<typename F> void iterate(F f);
|
||||
template<typename Closure> void oops_do(Closure* cl);
|
||||
|
||||
size_t num_dead() const { return _basic_state.num_dead(); }
|
||||
void increment_num_dead(size_t num_dead) { _basic_state.increment_num_dead(num_dead); }
|
||||
void report_num_dead() const { _basic_state.report_num_dead(); }
|
||||
};
|
||||
|
||||
template<>
|
||||
@ -193,6 +202,10 @@ public:
|
||||
template<typename Closure> void weak_oops_do(Closure* cl);
|
||||
template<typename IsAliveClosure, typename Closure>
|
||||
void weak_oops_do(IsAliveClosure* is_alive, Closure* cl);
|
||||
|
||||
size_t num_dead() const { return _basic_state.num_dead(); }
|
||||
void increment_num_dead(size_t num_dead) { _basic_state.increment_num_dead(num_dead); }
|
||||
void report_num_dead() const { _basic_state.report_num_dead(); }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_OOPSTORAGEPARSTATE_HPP
|
||||
|
@ -32,27 +32,39 @@
|
||||
// +1 for NULL singular entry.
|
||||
OopStorage* OopStorageSet::storages[all_count + 1] = {};
|
||||
|
||||
void OopStorageSet::initialize() {
|
||||
storages[jni_global_index] = new OopStorage("JNI Global");
|
||||
storages[vm_global_index] = new OopStorage("VM Global");
|
||||
storages[jni_weak_index] = new OopStorage("JNI Weak");
|
||||
storages[vm_weak_index] = new OopStorage("VM Weak");
|
||||
storages[string_table_weak_index] = new OopStorage("StringTable Weak");
|
||||
storages[resolved_method_table_weak_index] =
|
||||
new OopStorage("ResolvedMethodTable Weak");
|
||||
|
||||
// Ensure we have all of them.
|
||||
STATIC_ASSERT(all_count == 6);
|
||||
assert(storages[singular_index] == NULL, "postcondition");
|
||||
#ifdef ASSERT
|
||||
for (uint i = all_start; i < all_end; ++i) {
|
||||
assert(storages[i] != NULL, "postcondition");
|
||||
}
|
||||
#endif // ASSERT
|
||||
OopStorage* OopStorageSet::create_strong(const char* name) {
|
||||
static uint registered_strong = 0;
|
||||
assert(registered_strong < strong_count, "More registered strong storages than slots");
|
||||
OopStorage* storage = new OopStorage(name);
|
||||
storages[strong_start + registered_strong++] = storage;
|
||||
return storage;
|
||||
}
|
||||
|
||||
void oopstorage_init() {
|
||||
OopStorageSet::initialize();
|
||||
OopStorage* OopStorageSet::create_weak(const char* name) {
|
||||
static uint registered_weak = 0;
|
||||
assert(registered_weak < weak_count, "More registered strong storages than slots");
|
||||
OopStorage* storage = new OopStorage(name);
|
||||
storages[weak_start + registered_weak++] = storage;
|
||||
return storage;
|
||||
}
|
||||
|
||||
|
||||
void OopStorageSet::fill_strong(OopStorage* to[strong_count]) {
|
||||
for (uint i = 0; i < OopStorageSet::strong_count; i++) {
|
||||
to[i] = storage(strong_start + i);
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorageSet::fill_weak(OopStorage* to[weak_count]) {
|
||||
for (uint i = 0; i < OopStorageSet::weak_count; i++) {
|
||||
to[i] = storage(weak_start + i);
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorageSet::fill_all(OopStorage* to[all_count]) {
|
||||
for (uint i = 0; i < OopStorageSet::all_count; i++) {
|
||||
to[i] = storage(all_start + i);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -33,30 +33,22 @@
|
||||
class OopStorage;
|
||||
|
||||
class OopStorageSet : public AllStatic {
|
||||
friend class OopStorageSetTest;
|
||||
|
||||
public:
|
||||
// Must be updated when new OopStorages are introduced
|
||||
static const uint strong_count = 2;
|
||||
static const uint weak_count = 4;
|
||||
static const uint all_count = strong_count + weak_count;
|
||||
|
||||
private:
|
||||
friend void oopstorage_init();
|
||||
|
||||
enum {
|
||||
singular_index, // For singular iterator.
|
||||
|
||||
all_start,
|
||||
|
||||
// Strong
|
||||
strong_start = all_start,
|
||||
jni_global_index = strong_start,
|
||||
vm_global_index,
|
||||
strong_end,
|
||||
|
||||
// Weak
|
||||
weak_start = strong_end,
|
||||
jni_weak_index = weak_start,
|
||||
vm_weak_index,
|
||||
string_table_weak_index,
|
||||
resolved_method_table_weak_index,
|
||||
weak_end,
|
||||
|
||||
all_end = weak_end
|
||||
};
|
||||
static const uint singular_index = 0; // For singular iterator.
|
||||
static const uint all_start = 1;
|
||||
static const uint strong_start = all_start;
|
||||
static const uint strong_end = strong_start + strong_count;
|
||||
static const uint weak_start = strong_end;
|
||||
static const uint weak_end = weak_start + weak_count;
|
||||
static const uint all_end = weak_end;
|
||||
|
||||
static OopStorage* storages[all_end];
|
||||
|
||||
@ -67,34 +59,20 @@ private:
|
||||
return storages[index];
|
||||
}
|
||||
|
||||
static void initialize();
|
||||
// Testing support
|
||||
static void fill_strong(OopStorage* storage[strong_count]);
|
||||
static void fill_weak(OopStorage* storage[weak_count]);
|
||||
static void fill_all(OopStorage* storage[all_count]);
|
||||
|
||||
public:
|
||||
class Iterator;
|
||||
|
||||
static const uint strong_count = (strong_end - strong_start);
|
||||
static const uint weak_count = (weak_end - weak_start);
|
||||
static const uint all_count = (all_end - all_start);
|
||||
|
||||
static Iterator strong_iterator();
|
||||
static Iterator weak_iterator();
|
||||
static Iterator all_iterator();
|
||||
|
||||
// Strong
|
||||
static OopStorage* jni_global() { return storage(jni_global_index); }
|
||||
static OopStorage* vm_global() { return storage(vm_global_index); }
|
||||
|
||||
// Weak
|
||||
static OopStorage* jni_weak() { return storage(jni_weak_index); }
|
||||
static OopStorage* vm_weak() { return storage(vm_weak_index); }
|
||||
|
||||
static OopStorage* string_table_weak() {
|
||||
return storage(string_table_weak_index);
|
||||
}
|
||||
|
||||
static OopStorage* resolved_method_table_weak() {
|
||||
return storage(resolved_method_table_weak_index);
|
||||
}
|
||||
static OopStorage* create_strong(const char* name);
|
||||
static OopStorage* create_weak(const char* name);
|
||||
|
||||
template <typename Closure>
|
||||
static void strong_oops_do(Closure* cl);
|
||||
|
@ -46,4 +46,23 @@ public:
|
||||
int par_state_count() const { return _par_states.count(); }
|
||||
};
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
class OopStorageSetWeakParState {
|
||||
typedef OopStorage::ParState<concurrent, is_const> ParStateType;
|
||||
typedef ValueObjArray<ParStateType, OopStorageSet::weak_count> ParStateArray;
|
||||
|
||||
ParStateArray _par_states;
|
||||
|
||||
public:
|
||||
OopStorageSetWeakParState();
|
||||
|
||||
template <typename ClosureType>
|
||||
void oops_do(ClosureType* cl);
|
||||
|
||||
ParStateType* par_state(int i) const { return _par_states.at(i); }
|
||||
int par_state_count() const { return _par_states.count(); }
|
||||
|
||||
void report_num_dead();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_OOPSTORAGESETPARSTATE_HPP
|
||||
|
@ -28,6 +28,9 @@
|
||||
#include "gc/shared/oopStorageParState.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
OopStorageSetStrongParState<concurrent, is_const>::OopStorageSetStrongParState() :
|
||||
@ -35,11 +38,65 @@ OopStorageSetStrongParState<concurrent, is_const>::OopStorageSetStrongParState()
|
||||
}
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
template <typename Closure>
|
||||
void OopStorageSetStrongParState<concurrent, is_const>::oops_do(Closure* cl) {
|
||||
template <typename ClosureType>
|
||||
void OopStorageSetStrongParState<concurrent, is_const>::oops_do(ClosureType* cl) {
|
||||
for (int i = 0; i < _par_states.count(); i++) {
|
||||
_par_states.at(i)->oops_do(cl);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
OopStorageSetWeakParState<concurrent, is_const>::OopStorageSetWeakParState() :
|
||||
_par_states(OopStorageSet::weak_iterator()) {
|
||||
}
|
||||
|
||||
template <typename ClosureType>
|
||||
class DeadCounterClosure : public OopClosure {
|
||||
private:
|
||||
ClosureType* const _cl;
|
||||
size_t _num_dead;
|
||||
|
||||
public:
|
||||
DeadCounterClosure(ClosureType* cl) :
|
||||
_cl(cl),
|
||||
_num_dead(0) {}
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
_cl->do_oop(p);
|
||||
if (Atomic::load(p) == NULL) {
|
||||
_num_dead++; // Count both already NULL and cleared by closure.
|
||||
}
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
size_t num_dead() const {
|
||||
return _num_dead;
|
||||
}
|
||||
};
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
template <typename ClosureType>
|
||||
void OopStorageSetWeakParState<concurrent, is_const>::oops_do(ClosureType* cl) {
|
||||
for (int i = 0; i < _par_states.count(); i++) {
|
||||
ParStateType* state = _par_states.at(i);
|
||||
if (state->storage()->should_report_num_dead()) {
|
||||
DeadCounterClosure<ClosureType> counting_cl(cl);
|
||||
state->oops_do(&counting_cl);
|
||||
state->increment_num_dead(counting_cl.num_dead());
|
||||
} else {
|
||||
state->oops_do(cl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
void OopStorageSetWeakParState<concurrent, is_const>::report_num_dead() {
|
||||
for (int i = 0; i < _par_states.count(); i++) {
|
||||
ParStateType* state = _par_states.at(i);
|
||||
state->storage()->report_num_dead(state->num_dead());
|
||||
}
|
||||
}
|
||||
#endif // SHARE_GC_SHARED_OOPSTORAGESETPARSTATE_INLINE_HPP
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/shared/oopStorageParState.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
||||
#include "gc/shared/weakProcessorPhases.hpp"
|
||||
#include "gc/shared/weakProcessorPhaseTimes.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
@ -36,32 +37,23 @@
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
template <typename Container>
|
||||
class OopsDoAndReportCounts {
|
||||
public:
|
||||
void operator()(BoolObjectClosure* is_alive, OopClosure* keep_alive, OopStorage* storage) {
|
||||
Container::reset_dead_counter();
|
||||
|
||||
CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
|
||||
storage->oops_do(&cl);
|
||||
|
||||
Container::inc_dead_counter(cl.num_dead() + cl.num_skipped());
|
||||
Container::finish_dead_counter();
|
||||
void WeakProcessor::do_serial_parts(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive) {
|
||||
WeakProcessorPhases::Iterator it = WeakProcessorPhases::serial_iterator();
|
||||
for ( ; !it.is_end(); ++it) {
|
||||
WeakProcessorPhases::processor(*it)(is_alive, keep_alive);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
|
||||
WeakProcessorPhases::Iterator pit = WeakProcessorPhases::serial_iterator();
|
||||
for ( ; !pit.is_end(); ++pit) {
|
||||
WeakProcessorPhases::processor(*pit)(is_alive, keep_alive);
|
||||
}
|
||||
do_serial_parts(is_alive, keep_alive);
|
||||
|
||||
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
|
||||
for ( ; !it.is_end(); ++it) {
|
||||
if (OopStorageSet::string_table_weak() == *it) {
|
||||
OopsDoAndReportCounts<StringTable>()(is_alive, keep_alive, *it);
|
||||
} else if (OopStorageSet::resolved_method_table_weak() == *it) {
|
||||
OopsDoAndReportCounts<ResolvedMethodTable>()(is_alive, keep_alive, *it);
|
||||
if (it->should_report_num_dead()) {
|
||||
CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
|
||||
it->oops_do(&cl);
|
||||
it->report_num_dead(cl.num_skipped() + cl.num_dead());
|
||||
} else {
|
||||
it->weak_oops_do(is_alive, keep_alive);
|
||||
}
|
||||
@ -70,7 +62,12 @@ void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_a
|
||||
|
||||
void WeakProcessor::oops_do(OopClosure* closure) {
|
||||
AlwaysTrueClosure always_true;
|
||||
weak_oops_do(&always_true, closure);
|
||||
do_serial_parts(&always_true, closure);
|
||||
|
||||
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
|
||||
for ( ; !it.is_end(); ++it) {
|
||||
it->weak_oops_do(closure);
|
||||
}
|
||||
}
|
||||
|
||||
uint WeakProcessor::ergo_workers(uint max_workers) {
|
||||
@ -109,26 +106,13 @@ void WeakProcessor::Task::initialize() {
|
||||
if (_phase_times) {
|
||||
_phase_times->set_active_workers(_nworkers);
|
||||
}
|
||||
|
||||
uint storage_count = WeakProcessorPhases::oopstorage_phase_count;
|
||||
_storage_states = NEW_C_HEAP_ARRAY(StorageState, storage_count, mtGC);
|
||||
|
||||
StorageState* cur_state = _storage_states;
|
||||
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
|
||||
for ( ; !it.is_end(); ++it, ++cur_state) {
|
||||
assert(pointer_delta(cur_state, _storage_states, sizeof(StorageState)) < storage_count, "invariant");
|
||||
new (cur_state) StorageState(*it, _nworkers);
|
||||
}
|
||||
assert(pointer_delta(cur_state, _storage_states, sizeof(StorageState)) == storage_count, "invariant");
|
||||
StringTable::reset_dead_counter();
|
||||
ResolvedMethodTable::reset_dead_counter();
|
||||
}
|
||||
|
||||
WeakProcessor::Task::Task(uint nworkers) :
|
||||
_phase_times(NULL),
|
||||
_nworkers(nworkers),
|
||||
_serial_phases_done(WeakProcessorPhases::serial_phase_count),
|
||||
_storage_states(NULL)
|
||||
_storage_states()
|
||||
{
|
||||
initialize();
|
||||
}
|
||||
@ -137,22 +121,16 @@ WeakProcessor::Task::Task(WeakProcessorPhaseTimes* phase_times, uint nworkers) :
|
||||
_phase_times(phase_times),
|
||||
_nworkers(nworkers),
|
||||
_serial_phases_done(WeakProcessorPhases::serial_phase_count),
|
||||
_storage_states(NULL)
|
||||
_storage_states()
|
||||
{
|
||||
initialize();
|
||||
}
|
||||
|
||||
WeakProcessor::Task::~Task() {
|
||||
if (_storage_states != NULL) {
|
||||
StorageState* states = _storage_states;
|
||||
for (uint i = 0; i < WeakProcessorPhases::oopstorage_phase_count; ++i) {
|
||||
states->StorageState::~StorageState();
|
||||
++states;
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(StorageState, _storage_states);
|
||||
void WeakProcessor::Task::report_num_dead() {
|
||||
for (int i = 0; i < _storage_states.par_state_count(); ++i) {
|
||||
StorageState* state = _storage_states.par_state(i);
|
||||
state->report_num_dead();
|
||||
}
|
||||
StringTable::finish_dead_counter();
|
||||
ResolvedMethodTable::finish_dead_counter();
|
||||
}
|
||||
|
||||
void WeakProcessor::GangTask::work(uint worker_id) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_SHARED_WEAKPROCESSOR_HPP
|
||||
|
||||
#include "gc/shared/oopStorageParState.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
@ -71,6 +72,8 @@ public:
|
||||
|
||||
private:
|
||||
class GangTask;
|
||||
|
||||
static void do_serial_parts(BoolObjectClosure* is_alive, OopClosure* keep_alive);
|
||||
};
|
||||
|
||||
class WeakProcessor::Task {
|
||||
@ -79,17 +82,18 @@ class WeakProcessor::Task {
|
||||
WeakProcessorPhaseTimes* _phase_times;
|
||||
uint _nworkers;
|
||||
SubTasksDone _serial_phases_done;
|
||||
StorageState* _storage_states;
|
||||
OopStorageSetWeakParState<false, false> _storage_states;
|
||||
|
||||
void initialize();
|
||||
|
||||
public:
|
||||
Task(uint nworkers); // No time tracking.
|
||||
Task(WeakProcessorPhaseTimes* phase_times, uint nworkers);
|
||||
~Task();
|
||||
|
||||
template<typename IsAlive, typename KeepAlive>
|
||||
void work(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive);
|
||||
|
||||
void report_num_dead();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_WEAKPROCESSOR_HPP
|
||||
|
@ -115,17 +115,12 @@ void WeakProcessor::Task::work(uint worker_id,
|
||||
CountingSkippedIsAliveClosure<IsAlive, KeepAlive> cl(is_alive, keep_alive);
|
||||
WeakProcessorPhaseTimeTracker pt(_phase_times, phase, worker_id);
|
||||
uint oopstorage_index = WeakProcessorPhases::oopstorage_index(phase);
|
||||
StorageState& cur_state = _storage_states[oopstorage_index];
|
||||
cur_state.oops_do(&cl);
|
||||
StorageState* cur_state = _storage_states.par_state(oopstorage_index);
|
||||
cur_state->oops_do(&cl);
|
||||
cur_state->increment_num_dead(cl.num_skipped() + cl.num_dead());
|
||||
if (_phase_times != NULL) {
|
||||
_phase_times->record_worker_items(worker_id, phase, cl.num_dead(), cl.num_total());
|
||||
}
|
||||
const OopStorage* cur_storage = cur_state.storage();
|
||||
if (cur_storage == OopStorageSet::string_table_weak()) {
|
||||
StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
|
||||
} else if (cur_storage == OopStorageSet::resolved_method_table_weak()) {
|
||||
ResolvedMethodTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
|
||||
}
|
||||
}
|
||||
|
||||
_serial_phases_done.all_tasks_completed(_nworkers);
|
||||
@ -159,6 +154,7 @@ public:
|
||||
{}
|
||||
|
||||
virtual void work(uint worker_id);
|
||||
void report_num_dead() { _task.report_num_dead(); }
|
||||
};
|
||||
|
||||
template<typename IsAlive, typename KeepAlive>
|
||||
@ -173,6 +169,7 @@ void WeakProcessor::weak_oops_do(WorkGang* workers,
|
||||
|
||||
GangTask task("Weak Processor", is_alive, keep_alive, phase_times, nworkers);
|
||||
workers->run_task(&task, nworkers);
|
||||
task.report_num_dead();
|
||||
}
|
||||
|
||||
template<typename IsAlive, typename KeepAlive>
|
||||
|
@ -94,6 +94,18 @@ public:
|
||||
inline void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
|
||||
class ShenandoahCleanUpdateWeakOopsClosure : public OopClosure {
|
||||
private:
|
||||
IsAlive* _is_alive;
|
||||
KeepAlive* _keep_alive;
|
||||
|
||||
public:
|
||||
inline ShenandoahCleanUpdateWeakOopsClosure(IsAlive* is_alive, KeepAlive* keep_alive);
|
||||
inline void do_oop(oop* p);
|
||||
inline void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class ShenandoahCodeBlobAndDisarmClosure: public CodeBlobToOopClosure {
|
||||
private:
|
||||
BarrierSetNMethod* const _bs;
|
||||
|
@ -144,6 +144,35 @@ void ShenandoahEvacUpdateOopStorageRootsClosure::do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
|
||||
ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::ShenandoahCleanUpdateWeakOopsClosure(IsAlive* is_alive, KeepAlive* keep_alive) :
|
||||
_is_alive(is_alive), _keep_alive(keep_alive) {
|
||||
if (!CONCURRENT) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||
}
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
|
||||
void ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::do_oop(oop* p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
if (_is_alive->do_object_b(obj)) {
|
||||
_keep_alive->do_oop(p);
|
||||
} else {
|
||||
if (CONCURRENT) {
|
||||
Atomic::cmpxchg(p, obj, oop());
|
||||
} else {
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, oop());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
|
||||
void ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
ShenandoahCodeBlobAndDisarmClosure::ShenandoahCodeBlobAndDisarmClosure(OopClosure* cl) :
|
||||
CodeBlobToOopClosure(cl, true /* fix_relocations */),
|
||||
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
|
||||
|
@ -1712,23 +1712,18 @@ private:
|
||||
ShenandoahMarkingContext* const _mark_context;
|
||||
bool _evac_in_progress;
|
||||
Thread* const _thread;
|
||||
size_t _dead_counter;
|
||||
|
||||
public:
|
||||
ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p);
|
||||
|
||||
size_t dead_counter() const;
|
||||
void reset_dead_counter();
|
||||
};
|
||||
|
||||
ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
|
||||
_heap(ShenandoahHeap::heap()),
|
||||
_mark_context(ShenandoahHeap::heap()->marking_context()),
|
||||
_evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
|
||||
_thread(Thread::current()),
|
||||
_dead_counter(0) {
|
||||
_thread(Thread::current()) {
|
||||
}
|
||||
|
||||
void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
|
||||
@ -1736,10 +1731,7 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
if (!_mark_context->is_marked(obj)) {
|
||||
shenandoah_assert_correct(p, obj);
|
||||
oop old = Atomic::cmpxchg(p, obj, oop(NULL));
|
||||
if (obj == old) {
|
||||
_dead_counter ++;
|
||||
}
|
||||
Atomic::cmpxchg(p, obj, oop(NULL));
|
||||
} else if (_evac_in_progress && _heap->in_collection_set(obj)) {
|
||||
oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
|
||||
if (resolved == obj) {
|
||||
@ -1757,14 +1749,6 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const {
|
||||
return _dead_counter;
|
||||
}
|
||||
|
||||
void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
|
||||
_dead_counter = 0;
|
||||
}
|
||||
|
||||
class ShenandoahIsCLDAliveClosure : public CLDClosure {
|
||||
public:
|
||||
void do_cld(ClassLoaderData* cld) {
|
||||
@ -1783,31 +1767,23 @@ public:
|
||||
// dead weak roots.
|
||||
class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
|
||||
private:
|
||||
ShenandoahWeakRoot<true /*concurrent*/> _jni_roots;
|
||||
ShenandoahWeakRoot<true /*concurrent*/> _string_table_roots;
|
||||
ShenandoahWeakRoot<true /*concurrent*/> _resolved_method_table_roots;
|
||||
ShenandoahWeakRoot<true /*concurrent*/> _vm_roots;
|
||||
ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
|
||||
|
||||
// Roots related to concurrent class unloading
|
||||
ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/>
|
||||
_cld_roots;
|
||||
ShenandoahConcurrentNMethodIterator _nmethod_itr;
|
||||
ShenandoahConcurrentStringDedupRoots _dedup_roots;
|
||||
bool _concurrent_class_unloading;
|
||||
_cld_roots;
|
||||
ShenandoahConcurrentNMethodIterator _nmethod_itr;
|
||||
ShenandoahConcurrentStringDedupRoots _dedup_roots;
|
||||
bool _concurrent_class_unloading;
|
||||
|
||||
public:
|
||||
ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
|
||||
AbstractGangTask("Shenandoah Concurrent Weak Root Task"),
|
||||
_jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots),
|
||||
_string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
|
||||
_resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
|
||||
_vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
|
||||
_nmethod_itr(ShenandoahCodeRoots::table()),
|
||||
_dedup_roots(phase),
|
||||
_concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
|
||||
StringTable::reset_dead_counter();
|
||||
ResolvedMethodTable::reset_dead_counter();
|
||||
if (_concurrent_class_unloading) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_nmethod_itr.nmethods_do_begin();
|
||||
@ -1815,12 +1791,12 @@ public:
|
||||
}
|
||||
|
||||
~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
|
||||
StringTable::finish_dead_counter();
|
||||
ResolvedMethodTable::finish_dead_counter();
|
||||
if (_concurrent_class_unloading) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_nmethod_itr.nmethods_do_end();
|
||||
}
|
||||
// Notify runtime data structures of potentially dead oops
|
||||
_vm_roots.report_num_dead();
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
@ -1830,17 +1806,8 @@ public:
|
||||
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
|
||||
// may race against OopStorage::release() calls.
|
||||
ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
|
||||
_jni_roots.oops_do(&cl, worker_id);
|
||||
_vm_roots.oops_do(&cl, worker_id);
|
||||
|
||||
cl.reset_dead_counter();
|
||||
_string_table_roots.oops_do(&cl, worker_id);
|
||||
StringTable::inc_dead_counter(cl.dead_counter());
|
||||
|
||||
cl.reset_dead_counter();
|
||||
_resolved_method_table_roots.oops_do(&cl, worker_id);
|
||||
ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
|
||||
|
||||
// String dedup weak roots
|
||||
ShenandoahForwardedIsAliveClosure is_alive;
|
||||
ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
|
||||
|
@ -38,17 +38,13 @@ class outputStream;
|
||||
f(CNT_PREFIX ## ThreadRoots, DESC_PREFIX "Thread Roots") \
|
||||
f(CNT_PREFIX ## CodeCacheRoots, DESC_PREFIX "Code Cache Roots") \
|
||||
f(CNT_PREFIX ## UniverseRoots, DESC_PREFIX "Universe Roots") \
|
||||
f(CNT_PREFIX ## JNIRoots, DESC_PREFIX "JNI Handles Roots") \
|
||||
f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \
|
||||
f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \
|
||||
f(CNT_PREFIX ## JNIWeakRoots, DESC_PREFIX "JNI Weak Roots") \
|
||||
f(CNT_PREFIX ## StringTableRoots, DESC_PREFIX "String Table Roots") \
|
||||
f(CNT_PREFIX ## ResolvedMethodTableRoots, DESC_PREFIX "Resolved Table Roots") \
|
||||
f(CNT_PREFIX ## VMGlobalRoots, DESC_PREFIX "VM Global Roots") \
|
||||
f(CNT_PREFIX ## VMStrongRoots, DESC_PREFIX "VM Strong Roots") \
|
||||
f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \
|
||||
f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \
|
||||
f(CNT_PREFIX ## ManagementRoots, DESC_PREFIX "Management Roots") \
|
||||
f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \
|
||||
f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \
|
||||
f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \
|
||||
f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \
|
||||
f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \
|
||||
f(CNT_PREFIX ## FinishQueues, DESC_PREFIX "Finish Queues") \
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
|
||||
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/shared/oopStorageParState.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.hpp"
|
||||
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
|
||||
@ -102,78 +102,28 @@ public:
|
||||
};
|
||||
|
||||
template <bool CONCURRENT>
|
||||
class ShenandoahVMRoot {
|
||||
class ShenandoahVMWeakRoots {
|
||||
private:
|
||||
OopStorage::ParState<CONCURRENT, false /* is_const */> _itr;
|
||||
const ShenandoahPhaseTimings::Phase _phase;
|
||||
const ShenandoahPhaseTimings::ParPhase _par_phase;
|
||||
public:
|
||||
ShenandoahVMRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase);
|
||||
|
||||
template <typename Closure>
|
||||
void oops_do(Closure* cl, uint worker_id);
|
||||
};
|
||||
|
||||
template <bool CONCURRENT>
|
||||
class ShenandoahWeakRoot : public ShenandoahVMRoot<CONCURRENT> {
|
||||
public:
|
||||
ShenandoahWeakRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase);
|
||||
};
|
||||
|
||||
template <>
|
||||
class ShenandoahWeakRoot<false /*concurrent*/> {
|
||||
private:
|
||||
OopStorage::ParState<false /*concurrent*/, false /*is_const*/> _itr;
|
||||
const ShenandoahPhaseTimings::Phase _phase;
|
||||
const ShenandoahPhaseTimings::ParPhase _par_phase;
|
||||
OopStorageSetWeakParState<CONCURRENT, false /* is_const */> _weak_roots;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
|
||||
public:
|
||||
ShenandoahWeakRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase);
|
||||
ShenandoahVMWeakRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
|
||||
template <typename IsAliveClosure, typename KeepAliveClosure>
|
||||
void weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id);
|
||||
};
|
||||
template <typename T>
|
||||
void oops_do(T* cl, uint worker_id);
|
||||
|
||||
template <bool CONCURRENT>
|
||||
class ShenandoahWeakRoots {
|
||||
private:
|
||||
ShenandoahWeakRoot<CONCURRENT> _jni_roots;
|
||||
ShenandoahWeakRoot<CONCURRENT> _string_table_roots;
|
||||
ShenandoahWeakRoot<CONCURRENT> _resolved_method_table_roots;
|
||||
ShenandoahWeakRoot<CONCURRENT> _vm_roots;
|
||||
template <typename IsAlive, typename KeepAlive>
|
||||
void weak_oops_do(IsAlive* is_alive, KeepAlive* keep_alive, uint worker_id);
|
||||
|
||||
public:
|
||||
ShenandoahWeakRoots();
|
||||
|
||||
template <typename Closure>
|
||||
void oops_do(Closure* cl, uint worker_id);
|
||||
};
|
||||
|
||||
template <>
|
||||
class ShenandoahWeakRoots<false /*concurrent */> {
|
||||
private:
|
||||
ShenandoahWeakRoot<false /*concurrent*/> _jni_roots;
|
||||
ShenandoahWeakRoot<false /*concurrent*/> _string_table_roots;
|
||||
ShenandoahWeakRoot<false /*concurrent*/> _resolved_method_table_roots;
|
||||
ShenandoahWeakRoot<false /*concurrent*/> _vm_roots;
|
||||
public:
|
||||
ShenandoahWeakRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
|
||||
template <typename Closure>
|
||||
void oops_do(Closure* cl, uint worker_id);
|
||||
|
||||
template <typename IsAliveClosure, typename KeepAliveClosure>
|
||||
void weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id);
|
||||
void report_num_dead();
|
||||
};
|
||||
|
||||
template <bool CONCURRENT>
|
||||
class ShenandoahVMRoots {
|
||||
private:
|
||||
ShenandoahVMRoot<CONCURRENT> _jni_handle_roots;
|
||||
ShenandoahVMRoot<CONCURRENT> _vm_global_roots;
|
||||
OopStorageSetStrongParState<CONCURRENT, false /* is_const */> _strong_roots;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
|
||||
public:
|
||||
ShenandoahVMRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
@ -305,7 +255,7 @@ private:
|
||||
ShenandoahClassLoaderDataRoots<false /*concurrent*/, true /*single threaded*/>
|
||||
_cld_roots;
|
||||
ShenandoahSerialWeakRoots _serial_weak_roots;
|
||||
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahConcurrentStringDedupRoots _dedup_roots;
|
||||
ShenandoahCodeCacheRoots _code_roots;
|
||||
|
||||
@ -324,7 +274,7 @@ private:
|
||||
_cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahSerialWeakRoots _serial_weak_roots;
|
||||
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahStringDedupRoots _dedup_roots;
|
||||
ShenandoahCodeCacheRoots _code_roots;
|
||||
bool _stw_roots_processing;
|
||||
@ -345,7 +295,7 @@ private:
|
||||
_cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahSerialWeakRoots _serial_weak_roots;
|
||||
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahStringDedupRoots _dedup_roots;
|
||||
ShenandoahCodeCacheRoots _code_roots;
|
||||
|
||||
@ -365,7 +315,7 @@ private:
|
||||
_cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahSerialWeakRoots _serial_weak_roots;
|
||||
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
|
||||
ShenandoahStringDedupRoots _dedup_roots;
|
||||
ShenandoahCodeCacheRoots _code_roots;
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/oopStorageParState.inline.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
|
||||
@ -42,84 +42,40 @@
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
template <bool CONCURRENT>
|
||||
inline ShenandoahVMRoot<CONCURRENT>::ShenandoahVMRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
|
||||
_itr(storage), _phase(phase), _par_phase(par_phase) {
|
||||
ShenandoahVMWeakRoots<CONCURRENT>::ShenandoahVMWeakRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
_phase(phase) {
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
template <typename Closure>
|
||||
inline void ShenandoahVMRoot<CONCURRENT>::oops_do(Closure* cl, uint worker_id) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id);
|
||||
_itr.oops_do(cl);
|
||||
template <typename T>
|
||||
void ShenandoahVMWeakRoots<CONCURRENT>::oops_do(T* cl, uint worker_id) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::VMWeakRoots, worker_id);
|
||||
_weak_roots.oops_do(cl);
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
inline ShenandoahWeakRoot<CONCURRENT>::ShenandoahWeakRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
|
||||
ShenandoahVMRoot<CONCURRENT>(storage, phase, par_phase) {
|
||||
}
|
||||
|
||||
inline ShenandoahWeakRoot<false>::ShenandoahWeakRoot(OopStorage* storage,
|
||||
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
|
||||
_itr(storage), _phase(phase), _par_phase(par_phase) {
|
||||
}
|
||||
|
||||
template <typename IsAliveClosure, typename KeepAliveClosure>
|
||||
void ShenandoahWeakRoot<false /* concurrent */>::weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id);
|
||||
_itr.weak_oops_do(is_alive, keep_alive);
|
||||
template <typename IsAlive, typename KeepAlive>
|
||||
void ShenandoahVMWeakRoots<CONCURRENT>::weak_oops_do(IsAlive* is_alive, KeepAlive* keep_alive, uint worker_id) {
|
||||
ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive> cl(is_alive, keep_alive);
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::VMWeakRoots, worker_id);
|
||||
_weak_roots.oops_do(&cl);
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
ShenandoahWeakRoots<CONCURRENT>::ShenandoahWeakRoots() :
|
||||
_jni_roots(OopStorageSet::jni_weak(), ShenandoahPhaseTimings::JNIWeakRoots),
|
||||
_string_table_roots(OopStorageSet::string_table_weak(), ShenandoahPhaseTimings::StringTableRoots),
|
||||
_resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), ShenandoahPhaseTimings::ResolvedMethodTableRoots),
|
||||
_vm_roots(OopStorageSet::vm_weak(), ShenandoahPhaseTimings::VMWeakRoots) {
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
template <typename Closure>
|
||||
void ShenandoahWeakRoots<CONCURRENT>::oops_do(Closure* cl, uint worker_id) {
|
||||
_jni_roots.oops_do(cl, worker_id);
|
||||
_string_table_roots.oops_do(cl, worker_id);
|
||||
_resolved_method_table_roots.oops_do(cl, worker_id);
|
||||
_vm_roots.oops_do(cl, worker_id);
|
||||
}
|
||||
|
||||
inline ShenandoahWeakRoots<false /* concurrent */>::ShenandoahWeakRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
_jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots),
|
||||
_string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
|
||||
_resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
|
||||
_vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots) {
|
||||
}
|
||||
|
||||
template <typename IsAliveClosure, typename KeepAliveClosure>
|
||||
void ShenandoahWeakRoots<false /* concurrent*/>::weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id) {
|
||||
_jni_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_string_table_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_resolved_method_table_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_vm_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
}
|
||||
|
||||
template <typename Closure>
|
||||
void ShenandoahWeakRoots<false /* concurrent */>::oops_do(Closure* cl, uint worker_id) {
|
||||
AlwaysTrueClosure always_true;
|
||||
weak_oops_do<AlwaysTrueClosure, Closure>(&always_true, cl, worker_id);
|
||||
void ShenandoahVMWeakRoots<CONCURRENT>::report_num_dead() {
|
||||
_weak_roots.report_num_dead();
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
ShenandoahVMRoots<CONCURRENT>::ShenandoahVMRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
_jni_handle_roots(OopStorageSet::jni_global(), phase, ShenandoahPhaseTimings::JNIRoots),
|
||||
_vm_global_roots(OopStorageSet::vm_global(), phase, ShenandoahPhaseTimings::VMGlobalRoots) {
|
||||
_phase(phase) {
|
||||
}
|
||||
|
||||
template <bool CONCURRENT>
|
||||
template <typename T>
|
||||
void ShenandoahVMRoots<CONCURRENT>::oops_do(T* cl, uint worker_id) {
|
||||
_jni_handle_roots.oops_do(cl, worker_id);
|
||||
_vm_global_roots.oops_do(cl, worker_id);
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::VMStrongRoots, worker_id);
|
||||
_strong_roots.oops_do(cl);
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, bool SINGLE_THREADED>
|
||||
@ -248,7 +204,7 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(keep_alive, worker_id);
|
||||
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_weak_roots.weak_oops_do<IsAlive, KeepAlive>(is_alive, keep_alive, worker_id);
|
||||
_dedup_roots.oops_do(is_alive, keep_alive, worker_id);
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
|
||||
|
@ -84,7 +84,7 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
|
||||
if (verify(JNIHandleRoots)) {
|
||||
shenandoah_assert_safepoint();
|
||||
JNIHandles::oops_do(oops);
|
||||
OopStorageSet::vm_global()->oops_do(oops);
|
||||
Universe::vm_global()->oops_do(oops);
|
||||
}
|
||||
|
||||
if (verify(WeakRoots)) {
|
||||
@ -125,7 +125,7 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
|
||||
Management::oops_do(oops);
|
||||
JNIHandles::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
OopStorageSet::vm_global()->oops_do(oops);
|
||||
Universe::vm_global()->oops_do(oops);
|
||||
|
||||
AlwaysTrueClosure always_true;
|
||||
WeakProcessor::weak_oops_do(&always_true, oops);
|
||||
@ -152,7 +152,7 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
|
||||
Management::oops_do(oops);
|
||||
JNIHandles::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
OopStorageSet::vm_global()->oops_do(oops);
|
||||
Universe::vm_global()->oops_do(oops);
|
||||
|
||||
// Do thread roots the last. This allows verification code to find
|
||||
// any broken objects from those special roots first, not the accidental
|
||||
|
@ -79,10 +79,7 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Ro
|
||||
static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak");
|
||||
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsVMWeakHandles("Concurrent Weak Roots VMWeakHandles");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsJNIWeakHandles("Concurrent Weak Roots JNIWeakHandles");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsStringTable("Concurrent Weak Roots StringTable");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsResolvedMethodTable("Concurrent Weak Roots ResolvedMethodTable");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet");
|
||||
|
||||
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
|
||||
ZSerialOopsDo<T, F>::ZSerialOopsDo(T* iter) :
|
||||
@ -341,76 +338,20 @@ void ZWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||
}
|
||||
|
||||
ZConcurrentWeakRootsIterator::ZConcurrentWeakRootsIterator() :
|
||||
_vm_weak_handles_iter(OopStorageSet::vm_weak()),
|
||||
_jni_weak_handles_iter(OopStorageSet::jni_weak()),
|
||||
_string_table_iter(OopStorageSet::string_table_weak()),
|
||||
_resolved_method_table_iter(OopStorageSet::resolved_method_table_weak()),
|
||||
_vm_weak_handles(this),
|
||||
_jni_weak_handles(this),
|
||||
_string_table(this),
|
||||
_resolved_method_table(this) {
|
||||
StringTable::reset_dead_counter();
|
||||
ResolvedMethodTable::reset_dead_counter();
|
||||
_oop_storage_set_iter(),
|
||||
_oop_storage_set(this) {
|
||||
}
|
||||
|
||||
ZConcurrentWeakRootsIterator::~ZConcurrentWeakRootsIterator() {
|
||||
StringTable::finish_dead_counter();
|
||||
ResolvedMethodTable::finish_dead_counter();
|
||||
void ZConcurrentWeakRootsIterator::report_num_dead() {
|
||||
_oop_storage_set_iter.report_num_dead();
|
||||
}
|
||||
|
||||
void ZConcurrentWeakRootsIterator::do_vm_weak_handles(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsVMWeakHandles);
|
||||
_vm_weak_handles_iter.oops_do(cl);
|
||||
}
|
||||
|
||||
void ZConcurrentWeakRootsIterator::do_jni_weak_handles(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsJNIWeakHandles);
|
||||
_jni_weak_handles_iter.oops_do(cl);
|
||||
}
|
||||
|
||||
template <class Container>
|
||||
class ZDeadCounterClosure : public ZRootsIteratorClosure {
|
||||
private:
|
||||
ZRootsIteratorClosure* const _cl;
|
||||
size_t _ndead;
|
||||
|
||||
public:
|
||||
ZDeadCounterClosure(ZRootsIteratorClosure* cl) :
|
||||
_cl(cl),
|
||||
_ndead(0) {}
|
||||
|
||||
~ZDeadCounterClosure() {
|
||||
Container::inc_dead_counter(_ndead);
|
||||
}
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
_cl->do_oop(p);
|
||||
if (*p == NULL) {
|
||||
_ndead++;
|
||||
}
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
};
|
||||
|
||||
void ZConcurrentWeakRootsIterator::do_string_table(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsStringTable);
|
||||
ZDeadCounterClosure<StringTable> counter_cl(cl);
|
||||
_string_table_iter.oops_do(&counter_cl);
|
||||
}
|
||||
|
||||
void ZConcurrentWeakRootsIterator::do_resolved_method_table(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsResolvedMethodTable);
|
||||
ZDeadCounterClosure<ResolvedMethodTable> counter_cl(cl);
|
||||
_resolved_method_table_iter.oops_do(&counter_cl);
|
||||
void ZConcurrentWeakRootsIterator::do_oop_storage_set(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet);
|
||||
_oop_storage_set_iter.oops_do(cl);
|
||||
}
|
||||
|
||||
void ZConcurrentWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhaseConcurrentWeakRoots);
|
||||
_vm_weak_handles.oops_do(cl);
|
||||
_jni_weak_handles.oops_do(cl);
|
||||
_string_table.oops_do(cl);
|
||||
_resolved_method_table.oops_do(cl);
|
||||
_oop_storage_set.oops_do(cl);
|
||||
}
|
||||
|
@ -36,7 +36,8 @@
|
||||
class ZRootsIteratorClosure;
|
||||
|
||||
typedef OopStorage::ParState<true /* concurrent */, false /* is_const */> ZOopStorageIterator;
|
||||
typedef OopStorageSetStrongParState<true /* concurrent */, false /* is_const */> ZOopStorageSetIterator;
|
||||
typedef OopStorageSetStrongParState<true /* concurrent */, false /* is_const */> ZOopStorageSetStrongIterator;
|
||||
typedef OopStorageSetWeakParState<true /* concurrent */, false /* is_const */> ZOopStorageSetWeakIterator;
|
||||
|
||||
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
|
||||
class ZSerialOopsDo {
|
||||
@ -134,8 +135,8 @@ public:
|
||||
|
||||
class ZConcurrentRootsIterator {
|
||||
private:
|
||||
ZOopStorageSetIterator _oop_storage_set_iter;
|
||||
const int _cld_claim;
|
||||
ZOopStorageSetStrongIterator _oop_storage_set_iter;
|
||||
const int _cld_claim;
|
||||
|
||||
void do_oop_storage_set(ZRootsIteratorClosure* cl);
|
||||
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
|
||||
@ -186,26 +187,18 @@ public:
|
||||
|
||||
class ZConcurrentWeakRootsIterator {
|
||||
private:
|
||||
ZOopStorageIterator _vm_weak_handles_iter;
|
||||
ZOopStorageIterator _jni_weak_handles_iter;
|
||||
ZOopStorageIterator _string_table_iter;
|
||||
ZOopStorageIterator _resolved_method_table_iter;
|
||||
ZOopStorageSetWeakIterator _oop_storage_set_iter;
|
||||
|
||||
void do_vm_weak_handles(ZRootsIteratorClosure* cl);
|
||||
void do_jni_weak_handles(ZRootsIteratorClosure* cl);
|
||||
void do_string_table(ZRootsIteratorClosure* cl);
|
||||
void do_resolved_method_table(ZRootsIteratorClosure* cl);
|
||||
void do_oop_storage_set(ZRootsIteratorClosure* cl);
|
||||
|
||||
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_vm_weak_handles> _vm_weak_handles;
|
||||
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles;
|
||||
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_string_table> _string_table;
|
||||
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_resolved_method_table> _resolved_method_table;
|
||||
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_oop_storage_set> _oop_storage_set;
|
||||
|
||||
public:
|
||||
ZConcurrentWeakRootsIterator();
|
||||
~ZConcurrentWeakRootsIterator();
|
||||
|
||||
void oops_do(ZRootsIteratorClosure* cl);
|
||||
|
||||
void report_num_dead();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZROOTSITERATOR_HPP
|
||||
|
@ -62,6 +62,10 @@ public:
|
||||
ZTask("ZProcessConccurentWeakRootsTask"),
|
||||
_concurrent_weak_roots() {}
|
||||
|
||||
~ZProcessConcurrentWeakRootsTask() {
|
||||
_concurrent_weak_roots.report_num_dead();
|
||||
}
|
||||
|
||||
virtual void work() {
|
||||
ZPhantomCleanOopClosure cl;
|
||||
_concurrent_weak_roots.oops_do(&cl);
|
||||
|
@ -151,7 +151,7 @@ bool ReferenceToRootClosure::do_oop_storage_roots() {
|
||||
for (OopStorageSet::Iterator it = OopStorageSet::strong_iterator(); !it.is_end(); ++it, ++i) {
|
||||
assert(!complete(), "invariant");
|
||||
OopStorage* oop_storage = *it;
|
||||
OldObjectRoot::Type type = oop_storage == OopStorageSet::jni_global() ?
|
||||
OldObjectRoot::Type type = JNIHandles::is_global_storage(oop_storage) ?
|
||||
OldObjectRoot::_global_jni_handle :
|
||||
OldObjectRoot::_global_oop_handle;
|
||||
OldObjectRoot::System system = OldObjectRoot::System(OldObjectRoot::_strong_oop_storage_set_first + i);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "jvmci/jniAccessMark.inline.hpp"
|
||||
#include "jvmci/jvmciCompilerToVM.hpp"
|
||||
#include "jvmci/jvmciRuntime.hpp"
|
||||
@ -720,7 +719,7 @@ JVMCIRuntime::JVMCIRuntime(int id) {
|
||||
|
||||
// Handles to objects in the Hotspot heap.
|
||||
static OopStorage* object_handles() {
|
||||
return OopStorageSet::vm_global();
|
||||
return Universe::vm_global();
|
||||
}
|
||||
|
||||
jobject JVMCIRuntime::make_global(const Handle& obj) {
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "gc/shared/gcConfig.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
@ -160,6 +161,9 @@ bool Universe::_fully_initialized = false;
|
||||
size_t Universe::_heap_capacity_at_last_gc;
|
||||
size_t Universe::_heap_used_at_last_gc = 0;
|
||||
|
||||
OopStorage* Universe::_vm_weak = NULL;
|
||||
OopStorage* Universe::_vm_global = NULL;
|
||||
|
||||
CollectedHeap* Universe::_collectedHeap = NULL;
|
||||
|
||||
void Universe::basic_type_classes_do(void f(Klass*)) {
|
||||
@ -789,6 +793,23 @@ void Universe::update_heap_info_at_gc() {
|
||||
_heap_used_at_last_gc = heap()->used();
|
||||
}
|
||||
|
||||
OopStorage* Universe::vm_weak() {
|
||||
return Universe::_vm_weak;
|
||||
}
|
||||
|
||||
OopStorage* Universe::vm_global() {
|
||||
return Universe::_vm_global;
|
||||
}
|
||||
|
||||
void Universe::oopstorage_init() {
|
||||
Universe::_vm_global = OopStorageSet::create_strong("VM Global");
|
||||
Universe::_vm_weak = OopStorageSet::create_weak("VM Weak");
|
||||
}
|
||||
|
||||
void universe_oopstorage_init() {
|
||||
Universe::oopstorage_init();
|
||||
}
|
||||
|
||||
void initialize_known_method(LatestMethodCache* method_cache,
|
||||
InstanceKlass* ik,
|
||||
const char* method,
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
class CollectedHeap;
|
||||
class DeferredObjAllocEvent;
|
||||
|
||||
class OopStorage;
|
||||
|
||||
// A helper class for caching a Method* when the user of the cache
|
||||
// only cares about the latest version of the Method*. This cache safely
|
||||
@ -184,6 +184,9 @@ class Universe: AllStatic {
|
||||
static size_t _heap_capacity_at_last_gc;
|
||||
static size_t _heap_used_at_last_gc;
|
||||
|
||||
static OopStorage* _vm_weak;
|
||||
static OopStorage* _vm_global;
|
||||
|
||||
static jint initialize_heap();
|
||||
static void initialize_tlab();
|
||||
static void initialize_basic_type_mirrors(TRAPS);
|
||||
@ -333,6 +336,11 @@ class Universe: AllStatic {
|
||||
static size_t get_heap_used_at_last_gc() { return _heap_used_at_last_gc; }
|
||||
static void update_heap_info_at_gc();
|
||||
|
||||
// Global OopStorages
|
||||
static OopStorage* vm_weak();
|
||||
static OopStorage* vm_global();
|
||||
static void oopstorage_init();
|
||||
|
||||
// Testers
|
||||
static bool is_bootstrapping() { return _bootstrapping; }
|
||||
static bool is_module_initialized() { return _module_initialized; }
|
||||
|
@ -2829,7 +2829,7 @@ void JvmtiObjectAllocEventCollector::generate_call_for_allocated() {
|
||||
oop obj = _allocated->at(i).resolve();
|
||||
_post_callback(JavaThread::current(), obj);
|
||||
// Release OopHandle
|
||||
_allocated->at(i).release(OopStorageSet::vm_global());
|
||||
_allocated->at(i).release(Universe::vm_global());
|
||||
|
||||
}
|
||||
delete _allocated, _allocated = NULL;
|
||||
@ -2841,7 +2841,7 @@ void JvmtiObjectAllocEventCollector::record_allocation(oop obj) {
|
||||
if (_allocated == NULL) {
|
||||
_allocated = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(1, mtServiceability);
|
||||
}
|
||||
_allocated->push(OopHandle(OopStorageSet::vm_global(), obj));
|
||||
_allocated->push(OopHandle(Universe::vm_global(), obj));
|
||||
}
|
||||
|
||||
// Disable collection of VMObjectAlloc events
|
||||
|
@ -202,19 +202,19 @@ JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
|
||||
assert(_method != NULL, "No method for breakpoint.");
|
||||
assert(_bci >= 0, "Negative bci for breakpoint.");
|
||||
oop class_holder_oop = _method->method_holder()->klass_holder();
|
||||
_class_holder = OopHandle(OopStorageSet::vm_global(), class_holder_oop);
|
||||
_class_holder = OopHandle(Universe::vm_global(), class_holder_oop);
|
||||
}
|
||||
|
||||
JvmtiBreakpoint::~JvmtiBreakpoint() {
|
||||
if (_class_holder.peek() != NULL) {
|
||||
_class_holder.release(OopStorageSet::vm_global());
|
||||
_class_holder.release(Universe::vm_global());
|
||||
}
|
||||
}
|
||||
|
||||
void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
|
||||
_method = bp._method;
|
||||
_bci = bp._bci;
|
||||
_class_holder = OopHandle(OopStorageSet::vm_global(), bp._class_holder.resolve());
|
||||
_class_holder = OopHandle(Universe::vm_global(), bp._class_holder.resolve());
|
||||
}
|
||||
|
||||
bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
|
||||
|
@ -83,7 +83,7 @@ class ResolvedMethodTableConfig : public AllStatic {
|
||||
return AllocateHeap(size, mtClass);
|
||||
}
|
||||
static void free_node(void* memory, Value const& value) {
|
||||
value.release(OopStorageSet::resolved_method_table_weak());
|
||||
value.release(ResolvedMethodTable::_oop_storage);
|
||||
FreeHeap(memory);
|
||||
ResolvedMethodTable::item_removed();
|
||||
}
|
||||
@ -93,14 +93,16 @@ static ResolvedMethodTableHash* _local_table = NULL;
|
||||
static size_t _current_size = (size_t)1 << ResolvedMethodTableSizeLog;
|
||||
|
||||
volatile bool ResolvedMethodTable::_has_work = false;
|
||||
OopStorage* ResolvedMethodTable::_oop_storage;
|
||||
|
||||
volatile size_t _items_count = 0;
|
||||
volatile size_t _uncleaned_items_count = 0;
|
||||
|
||||
void ResolvedMethodTable::create_table() {
|
||||
_local_table = new ResolvedMethodTableHash(ResolvedMethodTableSizeLog, END_SIZE, GROW_HINT);
|
||||
log_trace(membername, table)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
|
||||
_current_size, ResolvedMethodTableSizeLog);
|
||||
_oop_storage = OopStorageSet::create_weak("ResolvedMethodTable Weak");
|
||||
_oop_storage->register_num_dead_callback(&gc_notification);
|
||||
}
|
||||
|
||||
size_t ResolvedMethodTable::table_size() {
|
||||
@ -193,7 +195,7 @@ oop ResolvedMethodTable::add_method(const Method* method, Handle rmethod_name) {
|
||||
if (_local_table->get(thread, lookup, rmg)) {
|
||||
return rmg.get_res_oop();
|
||||
}
|
||||
WeakHandle wh(OopStorageSet::resolved_method_table_weak(), rmethod_name);
|
||||
WeakHandle wh(_oop_storage, rmethod_name);
|
||||
// The hash table takes ownership of the WeakHandle, even if it's not inserted.
|
||||
if (_local_table->insert(thread, lookup, wh)) {
|
||||
log_insert(method);
|
||||
@ -212,24 +214,26 @@ void ResolvedMethodTable::item_removed() {
|
||||
}
|
||||
|
||||
double ResolvedMethodTable::get_load_factor() {
|
||||
return (double)_items_count/_current_size;
|
||||
return double(_items_count)/double(_current_size);
|
||||
}
|
||||
|
||||
double ResolvedMethodTable::get_dead_factor() {
|
||||
return (double)_uncleaned_items_count/_current_size;
|
||||
double ResolvedMethodTable::get_dead_factor(size_t num_dead) {
|
||||
return double(num_dead)/double(_current_size);
|
||||
}
|
||||
|
||||
static const double PREF_AVG_LIST_LEN = 2.0;
|
||||
// If we have as many dead items as 50% of the number of bucket
|
||||
static const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
||||
|
||||
void ResolvedMethodTable::check_concurrent_work() {
|
||||
if (_has_work) {
|
||||
void ResolvedMethodTable::gc_notification(size_t num_dead) {
|
||||
log_trace(membername, table)("Uncleaned items:" SIZE_FORMAT, num_dead);
|
||||
|
||||
if (has_work()) {
|
||||
return;
|
||||
}
|
||||
|
||||
double load_factor = get_load_factor();
|
||||
double dead_factor = get_dead_factor();
|
||||
double dead_factor = get_dead_factor(num_dead);
|
||||
// We should clean/resize if we have more dead than alive,
|
||||
// more items than preferred load factor or
|
||||
// more dead items than water mark.
|
||||
@ -244,12 +248,15 @@ void ResolvedMethodTable::check_concurrent_work() {
|
||||
|
||||
void ResolvedMethodTable::trigger_concurrent_work() {
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
_has_work = true;
|
||||
Atomic::store(&_has_work, true);
|
||||
Service_lock->notify_all();
|
||||
}
|
||||
|
||||
bool ResolvedMethodTable::has_work() {
|
||||
return Atomic::load_acquire(&_has_work);
|
||||
}
|
||||
|
||||
void ResolvedMethodTable::do_concurrent_work(JavaThread* jt) {
|
||||
_has_work = false;
|
||||
double load_factor = get_load_factor();
|
||||
log_debug(membername, table)("Concurrent work, live factor: %g", load_factor);
|
||||
// We prefer growing, since that also removes dead items
|
||||
@ -258,6 +265,7 @@ void ResolvedMethodTable::do_concurrent_work(JavaThread* jt) {
|
||||
} else {
|
||||
clean_dead_entries(jt);
|
||||
}
|
||||
Atomic::release_store(&_has_work, false);
|
||||
}
|
||||
|
||||
void ResolvedMethodTable::grow(JavaThread* jt) {
|
||||
@ -323,22 +331,6 @@ void ResolvedMethodTable::clean_dead_entries(JavaThread* jt) {
|
||||
}
|
||||
log_info(membername, table)("Cleaned %ld of %ld", stdc._count, stdc._item);
|
||||
}
|
||||
void ResolvedMethodTable::reset_dead_counter() {
|
||||
_uncleaned_items_count = 0;
|
||||
}
|
||||
|
||||
void ResolvedMethodTable::inc_dead_counter(size_t ndead) {
|
||||
size_t total = Atomic::add(&_uncleaned_items_count, ndead);
|
||||
log_trace(membername, table)(
|
||||
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
|
||||
_uncleaned_items_count, ndead, total);
|
||||
}
|
||||
|
||||
// After the parallel walk this method must be called to trigger
|
||||
// cleaning. Note it might trigger a resize instead.
|
||||
void ResolvedMethodTable::finish_dead_counter() {
|
||||
check_concurrent_work();
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
class AdjustMethodEntries : public StackObj {
|
||||
|
@ -33,7 +33,20 @@ class ResolvedMethodTable;
|
||||
class ResolvedMethodTableConfig;
|
||||
|
||||
class ResolvedMethodTable : public AllStatic {
|
||||
static volatile bool _has_work;
|
||||
friend class ResolvedMethodTableConfig;
|
||||
|
||||
static volatile bool _has_work;
|
||||
static OopStorage* _oop_storage;
|
||||
|
||||
// Callback for GC to notify of changes that might require cleaning or resize.
|
||||
static void gc_notification(size_t num_dead);
|
||||
static void trigger_concurrent_work();
|
||||
|
||||
static double get_load_factor();
|
||||
static double get_dead_factor(size_t num_dead);
|
||||
|
||||
static void grow(JavaThread* jt);
|
||||
static void clean_dead_entries(JavaThread* jt);
|
||||
|
||||
public:
|
||||
// Initialization
|
||||
@ -50,30 +63,9 @@ public:
|
||||
static void item_removed();
|
||||
|
||||
// Cleaning
|
||||
static bool has_work() { return _has_work; }
|
||||
|
||||
// Cleaning and table management
|
||||
|
||||
static double get_load_factor();
|
||||
static double get_dead_factor();
|
||||
|
||||
static void check_concurrent_work();
|
||||
static void trigger_concurrent_work();
|
||||
static bool has_work();
|
||||
static void do_concurrent_work(JavaThread* jt);
|
||||
|
||||
static void grow(JavaThread* jt);
|
||||
static void clean_dead_entries(JavaThread* jt);
|
||||
|
||||
// GC Notification
|
||||
|
||||
// Must be called before a parallel walk where objects might die.
|
||||
static void reset_dead_counter();
|
||||
// After the parallel walk this method must be called to trigger
|
||||
// cleaning. Note it might trigger a resize instead.
|
||||
static void finish_dead_counter();
|
||||
// If GC uses ParState directly it should add the number of cleared
|
||||
// entries to this method.
|
||||
static void inc_dead_counter(size_t ndead);
|
||||
|
||||
// JVMTI Support - It is called at safepoint only for RedefineClasses
|
||||
JVMTI_ONLY(static void adjust_method_entries(bool * trace_name_printed);)
|
||||
|
@ -50,7 +50,7 @@
|
||||
void check_ThreadShadow();
|
||||
void eventlog_init();
|
||||
void mutex_init();
|
||||
void oopstorage_init();
|
||||
void universe_oopstorage_init();
|
||||
void chunkpool_init();
|
||||
void perfMemory_init();
|
||||
void SuspendibleThreadSet_init();
|
||||
@ -97,7 +97,7 @@ void vm_init_globals() {
|
||||
basic_types_init();
|
||||
eventlog_init();
|
||||
mutex_init();
|
||||
oopstorage_init();
|
||||
universe_oopstorage_init();
|
||||
chunkpool_init();
|
||||
perfMemory_init();
|
||||
SuspendibleThreadSet_init();
|
||||
|
@ -37,12 +37,12 @@
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
static OopStorage* global_handles() {
|
||||
return OopStorageSet::jni_global();
|
||||
OopStorage* JNIHandles::global_handles() {
|
||||
return _global_handles;
|
||||
}
|
||||
|
||||
static OopStorage* weak_global_handles() {
|
||||
return OopStorageSet::jni_weak();
|
||||
OopStorage* JNIHandles::weak_global_handles() {
|
||||
return _weak_global_handles;
|
||||
}
|
||||
|
||||
// Serviceability agent support.
|
||||
@ -50,11 +50,10 @@ OopStorage* JNIHandles::_global_handles = NULL;
|
||||
OopStorage* JNIHandles::_weak_global_handles = NULL;
|
||||
|
||||
void jni_handles_init() {
|
||||
JNIHandles::_global_handles = global_handles();
|
||||
JNIHandles::_weak_global_handles = weak_global_handles();
|
||||
JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
|
||||
JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
|
||||
}
|
||||
|
||||
|
||||
jobject JNIHandles::make_local(oop obj) {
|
||||
if (obj == NULL) {
|
||||
return NULL; // ignore null handles
|
||||
@ -201,6 +200,9 @@ void JNIHandles::weak_oops_do(OopClosure* f) {
|
||||
weak_global_handles()->weak_oops_do(f);
|
||||
}
|
||||
|
||||
bool JNIHandles::is_global_storage(const OopStorage* storage) {
|
||||
return _global_handles == storage;
|
||||
}
|
||||
|
||||
inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
|
||||
return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
|
||||
|
@ -42,6 +42,9 @@ class JNIHandles : AllStatic {
|
||||
static OopStorage* _weak_global_handles;
|
||||
friend void jni_handles_init();
|
||||
|
||||
static OopStorage* global_handles();
|
||||
static OopStorage* weak_global_handles();
|
||||
|
||||
inline static bool is_jweak(jobject handle);
|
||||
inline static oop* jobject_ptr(jobject handle); // NOT jweak!
|
||||
inline static oop* jweak_ptr(jobject handle);
|
||||
@ -124,6 +127,8 @@ class JNIHandles : AllStatic {
|
||||
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
|
||||
// Traversal of weak global handles.
|
||||
static void weak_oops_do(OopClosure* f);
|
||||
|
||||
static bool is_global_storage(const OopStorage* storage);
|
||||
};
|
||||
|
||||
|
||||
|
@ -31,75 +31,65 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
// GTEST assertions may introduce ODR-uses. Dodge them.
|
||||
template<typename T> static T no_odr(T x) { return x; }
|
||||
class OopStorageSetTest : public ::testing::Test {
|
||||
protected:
|
||||
// Returns index of s in storages, or size if not found.
|
||||
template <uint count>
|
||||
static size_t find_storage(OopStorage* s, OopStorage* storages[count]) {
|
||||
for (uint i = 0; i < count; ++i) {
|
||||
if (s == storages[i]) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static void fill_strong(OopStorage** storages, size_t size) {
|
||||
ASSERT_EQ(size, no_odr(OopStorageSet::strong_count));
|
||||
STATIC_ASSERT(2 == OopStorageSet::strong_count);
|
||||
storages[0] = OopStorageSet::jni_global();
|
||||
storages[1] = OopStorageSet::vm_global();
|
||||
}
|
||||
|
||||
static void fill_weak(OopStorage** storages, size_t size) {
|
||||
ASSERT_EQ(size, no_odr(OopStorageSet::weak_count));
|
||||
STATIC_ASSERT(4 == OopStorageSet::weak_count);
|
||||
storages[0] = OopStorageSet::jni_weak();
|
||||
storages[1] = OopStorageSet::vm_weak();
|
||||
storages[2] = OopStorageSet::string_table_weak();
|
||||
storages[3] = OopStorageSet::resolved_method_table_weak();
|
||||
}
|
||||
|
||||
static void fill_all(OopStorage** storages, size_t size) {
|
||||
ASSERT_EQ(size, no_odr(OopStorageSet::all_count));
|
||||
const uint strong_count = OopStorageSet::strong_count;
|
||||
fill_strong(storages, strong_count);
|
||||
fill_weak(storages + strong_count, size - strong_count);
|
||||
}
|
||||
|
||||
// Returns index of s in storages, or size if not found.
|
||||
static size_t find_storage(OopStorage* s, OopStorage** storages, size_t size) {
|
||||
for (uint i = 0; i < size; ++i) {
|
||||
if (s == storages[i]) {
|
||||
return i;
|
||||
template <uint count>
|
||||
static void check_iterator(OopStorageSet::Iterator it,
|
||||
OopStorage* storages[count]) {
|
||||
OopStorageSet::Iterator start = it;
|
||||
ASSERT_EQ(start, it);
|
||||
for ( ; !it.is_end(); ++it) {
|
||||
size_t index = find_storage<count>(*it, storages);
|
||||
ASSERT_LT(index, count);
|
||||
storages[index] = NULL;
|
||||
}
|
||||
ASSERT_NE(start, it);
|
||||
const OopStorage* null_storage = NULL;
|
||||
for (uint i = 0; i < count; ++i) {
|
||||
ASSERT_EQ(null_storage, storages[i]);
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static void check_iterator(OopStorageSet::Iterator it,
|
||||
OopStorage** storages,
|
||||
size_t size) {
|
||||
OopStorageSet::Iterator start = it;
|
||||
ASSERT_EQ(start, it);
|
||||
for ( ; !it.is_end(); ++it) {
|
||||
size_t index = find_storage(*it, storages, size);
|
||||
ASSERT_LT(index, size);
|
||||
storages[index] = NULL;
|
||||
}
|
||||
ASSERT_NE(start, it);
|
||||
const OopStorage* null_storage = NULL;
|
||||
for (uint i = 0; i < size; ++i) {
|
||||
ASSERT_EQ(null_storage, storages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_iterator(uint count,
|
||||
OopStorageSet::Iterator iterator,
|
||||
void (*fill)(OopStorage**, size_t)) {
|
||||
OopStorage** storages = NEW_C_HEAP_ARRAY(OopStorage*, count, mtGC);
|
||||
fill(storages, count);
|
||||
check_iterator(iterator, storages, count);
|
||||
FREE_C_HEAP_ARRAY(OopStorage*, storages);
|
||||
}
|
||||
|
||||
#define TEST_ITERATOR(kind) \
|
||||
TEST_VM(OopStorageSetTest, PASTE_TOKENS(kind, _iterator)) { \
|
||||
test_iterator(OopStorageSet::PASTE_TOKENS(kind, _count), \
|
||||
OopStorageSet::PASTE_TOKENS(kind, _iterator)(), \
|
||||
&PASTE_TOKENS(fill_, kind)); \
|
||||
template <uint count>
|
||||
static void test_iterator(OopStorageSet::Iterator iterator,
|
||||
void (*fill)(OopStorage*[count])) {
|
||||
OopStorage* storages[count];
|
||||
fill(storages);
|
||||
check_iterator<count>(iterator, storages);
|
||||
}
|
||||
|
||||
TEST_ITERATOR(strong);
|
||||
TEST_ITERATOR(weak)
|
||||
TEST_ITERATOR(all)
|
||||
static void test_strong_iterator() {
|
||||
test_iterator<OopStorageSet::strong_count>(
|
||||
OopStorageSet::strong_iterator(),
|
||||
&OopStorageSet::fill_strong);
|
||||
|
||||
}
|
||||
static void test_weak_iterator() {
|
||||
test_iterator<OopStorageSet::weak_count>(
|
||||
OopStorageSet::weak_iterator(),
|
||||
&OopStorageSet::fill_weak);
|
||||
|
||||
}
|
||||
static void test_all_iterator() {
|
||||
test_iterator<OopStorageSet::all_count>(
|
||||
OopStorageSet::all_iterator(),
|
||||
&OopStorageSet::fill_all);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_VM_F(OopStorageSetTest, iterators) {
|
||||
test_strong_iterator();
|
||||
test_weak_iterator();
|
||||
test_all_iterator();
|
||||
}
|
||||
|
@ -0,0 +1,284 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @requires vm.gc != "Epsilon"
|
||||
* @summary Stress the string table and cleaning.
|
||||
* Test argument is the approximate number of seconds to run.
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* @build sun.hotspot.WhiteBox
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* @run main/othervm
|
||||
* -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* runtime.stringtable.StringTableCleaningTest 30
|
||||
*/
|
||||
|
||||
package runtime.stringtable;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import sun.hotspot.gc.GC;
|
||||
|
||||
public class StringTableCleaningTest {
|
||||
public static void main(String[] args) throws Exception {
|
||||
List<String> subargs = new ArrayList<String>();
|
||||
subargs.addAll(List.of("-Xlog:gc,gc+start,stringtable*=trace", "-Xmx3G"));
|
||||
subargs.add(Tester.class.getName());
|
||||
subargs.addAll(Arrays.asList(args));
|
||||
OutputAnalyzer output = ProcessTools.executeTestJvm(subargs);
|
||||
output.shouldHaveExitValue(0);
|
||||
checkOutput(output);
|
||||
}
|
||||
|
||||
private static int fail(String msg) throws Exception {
|
||||
throw new RuntimeException(msg);
|
||||
}
|
||||
|
||||
// Recognizing GC start and end log lines.
|
||||
|
||||
private static final String gcPrefix = "\\[info\\s*\\]\\[gc";
|
||||
private static final String gcMiddle = "\\s*\\] GC\\(\\p{Digit}+\\) ";
|
||||
|
||||
private static final String gcStartPrefix = gcPrefix + ",start" + gcMiddle;
|
||||
private static final String gcEndPrefix = gcPrefix + gcMiddle;
|
||||
|
||||
// Suffix for SerialGC and ParallelGC.
|
||||
private static final String spSuffix = "Pause";
|
||||
|
||||
// All G1 pauses except Cleanup do weak reference clearing.
|
||||
private static final String g1Suffix = "Pause(?! Cleanup)";
|
||||
|
||||
// Suffix for ZGC.
|
||||
private static final String zSuffix = "Garbage Collection";
|
||||
|
||||
// Suffix for Shenandoah.
|
||||
private static final String shenSuffix = "Concurrent weak roots";
|
||||
|
||||
private static String getGcStartString() {
|
||||
if (GC.Serial.isSelected() || GC.Parallel.isSelected()) {
|
||||
return gcStartPrefix + spSuffix;
|
||||
} else if (GC.G1.isSelected()) {
|
||||
return gcStartPrefix + g1Suffix;
|
||||
} else if (GC.Z.isSelected()) {
|
||||
return gcStartPrefix + zSuffix;
|
||||
} else if (GC.Shenandoah.isSelected()) {
|
||||
return gcStartPrefix + shenSuffix;
|
||||
} else {
|
||||
return "unsupported GC";
|
||||
}
|
||||
}
|
||||
|
||||
private static String getGcEndString() {
|
||||
if (GC.Serial.isSelected() || GC.Parallel.isSelected()) {
|
||||
return gcEndPrefix + spSuffix;
|
||||
} else if (GC.G1.isSelected()) {
|
||||
return gcEndPrefix + g1Suffix;
|
||||
} else if (GC.Z.isSelected()) {
|
||||
return gcEndPrefix + zSuffix;
|
||||
} else if (GC.Shenandoah.isSelected()) {
|
||||
return gcEndPrefix + shenSuffix;
|
||||
} else {
|
||||
return "unsupported GC";
|
||||
}
|
||||
}
|
||||
|
||||
private static Pattern getGcStartPattern() {
|
||||
return Pattern.compile(getGcStartString());
|
||||
}
|
||||
|
||||
private static Pattern getGcEndPattern() {
|
||||
return Pattern.compile(getGcEndString());
|
||||
}
|
||||
|
||||
private static final Pattern pGcStart = getGcStartPattern();
|
||||
private static final Pattern pGcEnd = getGcEndPattern();
|
||||
|
||||
// Recognizing StringTable GC callback log lines.
|
||||
|
||||
private static final Pattern pCallback =
|
||||
Pattern.compile("\\[trace\\s*\\]\\[stringtable\\s*\\] Uncleaned items:");
|
||||
|
||||
private static boolean matchesPattern(String line, Pattern regex) {
|
||||
return regex.matcher(line).find();
|
||||
}
|
||||
|
||||
private static boolean matchesStart(String line) {
|
||||
return matchesPattern(line, pGcStart);
|
||||
}
|
||||
|
||||
private static boolean matchesEnd(String line) {
|
||||
return matchesPattern(line, pGcEnd);
|
||||
}
|
||||
|
||||
private static boolean matchesCallback(String line) {
|
||||
return matchesPattern(line, pCallback);
|
||||
}
|
||||
|
||||
// Search the lines for the first GC start log line in lines, starting
|
||||
// from fromIndex. Returns the index of that line, or -1 if no GC start
|
||||
// line found. Throws if a callback or GC end line is found first.
|
||||
private static int findStart(List<String> lines, int fromIndex)
|
||||
throws Exception
|
||||
{
|
||||
for (int i = fromIndex; i < lines.size(); ++i) {
|
||||
String line = lines.get(i);
|
||||
if (matchesStart(line)) {
|
||||
return i;
|
||||
} else if (matchesEnd(line)) {
|
||||
fail("End without Start: " + i);
|
||||
} else if (matchesCallback(line)) {
|
||||
fail("Callback without Start: " + i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Search the lines for the first callback log line in lines, starting
|
||||
// after gcStart. Returns the index of that line, or -1 if no callback
|
||||
// line is found (concurrent GC could start but not complete). Throws
|
||||
// if a GC start or GC end log line is found first.
|
||||
private static int findCallback(List<String> lines, int gcStart)
|
||||
throws Exception
|
||||
{
|
||||
for (int i = gcStart + 1; i < lines.size(); ++i) {
|
||||
String line = lines.get(i);
|
||||
if (matchesCallback(line)) {
|
||||
return i;
|
||||
} else if (matchesEnd(line)) {
|
||||
fail("Missing Callback in [" + gcStart + ", " + i + "]");
|
||||
} else if (matchesStart(line)) {
|
||||
fail("Two Starts: " + gcStart + ", " + i);
|
||||
}
|
||||
}
|
||||
return fail("Missing Callback for Start: " + gcStart);
|
||||
}
|
||||
|
||||
// Search the lines for the first GC end log line in lines, starting
|
||||
// after callback. Returns the index of that line, or -1 if no GC end
|
||||
// line is found (concurrent GC could start but not complete). Throws
|
||||
// if a GC start or a callback log line is found first.
|
||||
private static int findEnd(List<String> lines, int gcStart, int callback)
|
||||
throws Exception
|
||||
{
|
||||
for (int i = callback + 1; i < lines.size(); ++i) {
|
||||
String line = lines.get(i);
|
||||
if (matchesEnd(line)) {
|
||||
return i;
|
||||
} else if (matchesStart(line)) {
|
||||
fail("Missing End for Start: " + gcStart + " at " + i);
|
||||
} else if (matchesCallback(line)) {
|
||||
fail("Multiple Callbacks for Start: " + gcStart + " at " + i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
private static int check(List<String> lines, int fromIndex) throws Exception {
|
||||
int gcStart = findStart(lines, fromIndex);
|
||||
if (gcStart < 0) return -1;
|
||||
int callback = findCallback(lines, gcStart);
|
||||
if (callback < 0) return -1;
|
||||
int gcEnd = findEnd(lines, gcStart, callback);
|
||||
if (gcEnd < 0) return -1;
|
||||
return gcEnd + 1;
|
||||
}
|
||||
|
||||
private static void checkOutput(OutputAnalyzer output) throws Exception {
|
||||
List<String> lines = output.asLines();
|
||||
int count = -1;
|
||||
int i = 0;
|
||||
try {
|
||||
for ( ; i >= 0; i = check(lines, i)) { ++count; }
|
||||
} finally {
|
||||
if (i < 0) {
|
||||
System.out.println("Output check passed with " + count + " GCs");
|
||||
} else {
|
||||
System.out.println("--- Output check failed: " + count + " -----");
|
||||
System.out.println(output.getOutput());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class Tester {
|
||||
private static volatile boolean stopRequested = false;
|
||||
|
||||
private static final TimeUnit durationUnits = TimeUnit.SECONDS;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
long duration = Long.parseLong(args[0]);
|
||||
runTest(duration);
|
||||
}
|
||||
|
||||
public static void runTest(long duration) throws Exception {
|
||||
ScheduledExecutorService scheduler =
|
||||
Executors.newScheduledThreadPool(1);
|
||||
try {
|
||||
ScheduledFuture<?> stopper =
|
||||
scheduler.schedule(() -> stopRequested = true,
|
||||
duration,
|
||||
durationUnits);
|
||||
try {
|
||||
stringMaker(10000000, 100000, 50000);
|
||||
} finally {
|
||||
stopper.cancel(false);
|
||||
}
|
||||
} finally {
|
||||
scheduler.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
private static void stringMaker(int maxSize, int growStep, int shrinkStep)
|
||||
throws Exception
|
||||
{
|
||||
long stringNum = 0;
|
||||
while (true) {
|
||||
LinkedList<String> list = new LinkedList<String>();
|
||||
for (int i = 0; i < maxSize; ++i, ++stringNum) {
|
||||
if (stopRequested) {
|
||||
return;
|
||||
}
|
||||
if ((i != 0) && ((i % growStep) == 0)) {
|
||||
list.subList(0, shrinkStep).clear();
|
||||
}
|
||||
list.push(Long.toString(stringNum).intern());
|
||||
}
|
||||
// For generational collectors, try to move current list
|
||||
// contents to old-gen before dropping the list.
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user