8227054: ServiceThread needs to know about all OopStorage objects

8227053: ServiceThread cleanup of OopStorage is missing some

OopStorages provides named access and iteration.

Reviewed-by: eosterlund, pliden, coleenp
This commit is contained in:
Kim Barrett 2019-08-21 18:42:30 -04:00
parent de8d01d4d3
commit 1acad37ee6
28 changed files with 721 additions and 358 deletions

@ -30,6 +30,7 @@
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
@ -86,7 +87,6 @@ volatile bool StringTable::_has_work = false;
volatile bool StringTable::_needs_rehashing = false;
volatile size_t StringTable::_uncleaned_items_count = 0;
OopStorage* StringTable::_weak_handles = NULL;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
@ -206,9 +206,6 @@ static size_t ceil_log2(size_t val) {
}
void StringTable::create_table() {
_weak_handles = new OopStorage("StringTable weak",
StringTableWeakAlloc_lock,
StringTableWeakActive_lock);
size_t start_size_log_2 = ceil_log2(StringTableSize);
_current_size = ((size_t)1) << start_size_log_2;
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
@ -388,7 +385,7 @@ oop StringTable::do_intern(Handle string_or_null_h, const jchar* name,
void StringTable::oops_do(OopClosure* f) {
assert(f != NULL, "No closure");
_weak_handles->oops_do(f);
OopStorageSet::string_table_weak()->oops_do(f);
}
// Concurrent work

@ -25,7 +25,6 @@
#ifndef SHARE_CLASSFILE_STRINGTABLE_HPP
#define SHARE_CLASSFILE_STRINGTABLE_HPP
#include "gc/shared/oopStorage.hpp"
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "oops/oop.hpp"
@ -33,6 +32,7 @@
#include "utilities/tableStatistics.hpp"
class CompactHashtableWriter;
class JavaThread;
class SerializeClosure;
class StringTable;
@ -51,8 +51,6 @@ class StringTable : public CHeapObj<mtSymbol>{
// Set if one bucket is out of balance due to hash algorithm deficiency
static volatile bool _needs_rehashing;
static OopStorage* _weak_handles;
static void grow(JavaThread* jt);
static void clean_dead_entries(JavaThread* jt);
@ -78,8 +76,6 @@ class StringTable : public CHeapObj<mtSymbol>{
static size_t table_size();
static TableStatistics get_table_statistics();
static OopStorage* weak_storage() { return _weak_handles; }
static void create_table();
static void do_concurrent_work(JavaThread* jt);

@ -47,6 +47,7 @@
#include "compiler/compileBroker.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
#include "jfr/jfrEvents.hpp"
@ -114,10 +115,6 @@ bool SystemDictionary::_has_checkPackageAccess = false;
const int defaultProtectionDomainCacheSize = 1009;
OopStorage* SystemDictionary::_vm_global_oop_storage = NULL;
OopStorage* SystemDictionary::_vm_weak_oop_storage = NULL;
// ----------------------------------------------------------------------------
// Java-level SystemLoader and PlatformLoader
@ -1855,7 +1852,7 @@ void SystemDictionary::oops_do(OopClosure* f, bool include_handles) {
invoke_method_table()->oops_do(f);
if (include_handles) {
vm_global_oop_storage()->oops_do(f);
OopStorageSet::vm_global()->oops_do(f);
}
}
@ -2896,25 +2893,3 @@ int SystemDictionaryDCmd::num_arguments() {
return 0;
}
}
void SystemDictionary::initialize_oop_storage() {
_vm_global_oop_storage =
new OopStorage("VM Global Oop Handles",
VMGlobalAlloc_lock,
VMGlobalActive_lock);
_vm_weak_oop_storage =
new OopStorage("VM Weak Oop Handles",
VMWeakAlloc_lock,
VMWeakActive_lock);
}
OopStorage* SystemDictionary::vm_global_oop_storage() {
assert(_vm_global_oop_storage != NULL, "Uninitialized");
return _vm_global_oop_storage;
}
OopStorage* SystemDictionary::vm_weak_oop_storage() {
assert(_vm_weak_oop_storage != NULL, "Uninitialized");
return _vm_weak_oop_storage;
}

@ -84,7 +84,6 @@ class SymbolPropertyTable;
class ProtectionDomainCacheTable;
class ProtectionDomainCacheEntry;
class GCTimer;
class OopStorage;
#define WK_KLASS_ENUM_NAME(kname) kname##_knum
@ -349,7 +348,7 @@ public:
// Applies "f->do_oop" to all root oops in the system dictionary.
// If include_handles is true (the default), then the handles in the
// storage object returned by vm_global_oop_storage() are included.
// vm_global OopStorage object are included.
static void oops_do(OopClosure* f, bool include_handles = true);
// System loader lock
@ -565,10 +564,6 @@ public:
// ProtectionDomain cache
static ProtectionDomainCacheTable* _pd_cache_table;
// VM OopStorage objects.
static OopStorage* _vm_global_oop_storage;
static OopStorage* _vm_weak_oop_storage;
protected:
static void validate_protection_domain(InstanceKlass* klass,
Handle class_loader,
@ -623,10 +618,6 @@ public:
return !m->is_public() && m->method_holder() == SystemDictionary::Object_klass();
}
static void initialize_oop_storage();
static OopStorage* vm_global_oop_storage();
static OopStorage* vm_weak_oop_storage();
protected:
// Setup link to hierarchy
static void add_to_hierarchy(InstanceKlass* k, TRAPS);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -153,6 +153,8 @@ public:
bool concurrent);
~BasicParState();
const OopStorage* storage() const { return _storage; }
template<bool is_const, typename F> void iterate(F f);
static uint default_estimated_thread_count(bool concurrent);
@ -172,6 +174,7 @@ public:
_basic_state(storage, estimated_thread_count, concurrent)
{}
const OopStorage* storage() const { return _basic_state.storage(); }
template<typename F> void iterate(F f);
template<typename Closure> void oops_do(Closure* cl);
};
@ -186,6 +189,7 @@ public:
_basic_state(storage, estimated_thread_count, false)
{}
const OopStorage* storage() const { return _basic_state.storage(); }
template<typename F> void iterate(F f);
template<typename Closure> void oops_do(Closure* cl);
template<typename Closure> void weak_oops_do(Closure* cl);

@ -0,0 +1,94 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "runtime/mutex.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// +1 for NULL singular entry.
OopStorage* OopStorageSet::storages[all_count + 1] = {};
static Mutex* make_oopstorage_mutex(const char* storage_name,
const char* kind,
int rank) {
char name[256];
os::snprintf(name, sizeof(name), "%s %s lock", storage_name, kind);
return new PaddedMutex(rank, name, true, Mutex::_safepoint_check_never);
}
static OopStorage* make_oopstorage(const char* name) {
Mutex* alloc = make_oopstorage_mutex(name, "alloc", Mutex::oopstorage);
Mutex* active = make_oopstorage_mutex(name, "active", Mutex::oopstorage - 1);
return new OopStorage(name, alloc, active);
}
void OopStorageSet::initialize() {
storages[jni_global_index] = make_oopstorage("JNI global");
storages[vm_global_index] = make_oopstorage("VM global");
storages[jni_weak_index] = make_oopstorage("JNI weak");
storages[vm_weak_index] = make_oopstorage("VM weak");
storages[string_table_weak_index] = make_oopstorage("StringTable weak");
storages[resolved_method_table_weak_index] =
make_oopstorage("ResolvedMethodTable weak");
// Ensure we have all of them.
STATIC_ASSERT(all_count == 6);
assert(storages[singular_index] == NULL, "postcondition");
#ifdef ASSERT
for (uint i = all_start; i < all_end; ++i) {
assert(storages[i] != NULL, "postcondition");
}
#endif // ASSERT
}
void oopstorage_init() {
OopStorageSet::initialize();
}
#ifdef ASSERT
void OopStorageSet::verify_initialized(uint index) {
assert(storages[index] != NULL, "oopstorage_init not yet called");
}
void OopStorageSet::Iterator::verify_nonsingular() const {
assert(_category != singular, "precondition");
}
void OopStorageSet::Iterator::verify_category_match(const Iterator& other) const {
verify_nonsingular();
assert(_category == other._category, "precondition");
}
void OopStorageSet::Iterator::verify_dereferenceable() const {
verify_nonsingular();
assert(!is_end(), "precondition");
}
#endif // ASSERT

@ -0,0 +1,184 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_OOPSTORAGESET_HPP
#define SHARE_GC_SHARED_OOPSTORAGESET_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class OopStorage;
class OopStorageSet : public AllStatic {
private:
friend void oopstorage_init();
enum {
singular_index, // For singular iterator.
all_start,
// Strong
strong_start = all_start,
jni_global_index = strong_start,
vm_global_index,
strong_end,
// Weak
weak_start = strong_end,
jni_weak_index = weak_start,
vm_weak_index,
string_table_weak_index,
resolved_method_table_weak_index,
weak_end,
all_end = weak_end
};
static OopStorage* storages[all_end];
static void verify_initialized(uint index) NOT_DEBUG_RETURN;
static OopStorage* storage(uint index) {
verify_initialized(index);
return storages[index];
}
static void initialize();
public:
class Iterator;
static const uint strong_count = (strong_end - strong_start);
static const uint weak_count = (weak_end - weak_start);
static const uint all_count = (all_end - all_start);
static Iterator strong_iterator();
static Iterator weak_iterator();
static Iterator all_iterator();
// Strong
static OopStorage* jni_global() { return storage(jni_global_index); }
static OopStorage* vm_global() { return storage(vm_global_index); }
// Weak
static OopStorage* jni_weak() { return storage(jni_weak_index); }
static OopStorage* vm_weak() { return storage(vm_weak_index); }
static OopStorage* string_table_weak() {
return storage(string_table_weak_index);
}
static OopStorage* resolved_method_table_weak() {
return storage(resolved_method_table_weak_index);
}
};
class OopStorageSet::Iterator {
friend class OopStorageSet;
enum Category { singular, strong, weak, all };
uint _index;
uint _limit;
DEBUG_ONLY(Category _category;)
Iterator(uint index, uint limit, Category category) :
_index(index), _limit(limit) DEBUG_ONLY(COMMA _category(category)) {}
void verify_nonsingular() const NOT_DEBUG_RETURN;
void verify_category_match(const Iterator& other) const NOT_DEBUG_RETURN;
void verify_dereferenceable() const NOT_DEBUG_RETURN;
public:
// Construct a singular iterator for later assignment. The only valid
// operations are destruction and assignment.
Iterator() :
_index(singular_index),
_limit(singular_index)
DEBUG_ONLY(COMMA _category(singular)) {}
bool is_end() const {
verify_nonsingular();
return _index == _limit;
}
bool operator==(const Iterator& other) const {
verify_category_match(other);
return _index == other._index;
}
bool operator!=(const Iterator& other) const {
return !operator==(other);
}
OopStorage* operator*() const {
verify_dereferenceable();
return storage(_index);
}
OopStorage* operator->() const {
return operator*();
}
Iterator& operator++() {
verify_dereferenceable();
++_index;
return *this;
}
Iterator operator++(int) {
Iterator result = *this;
operator++();
return result;
}
Iterator begin() const {
verify_nonsingular();
return *this;
}
Iterator end() const {
verify_nonsingular();
Iterator result = *this;
result._index = _limit;
return result;
}
};
inline OopStorageSet::Iterator OopStorageSet::strong_iterator() {
return Iterator(strong_start, strong_end, Iterator::strong);
}
inline OopStorageSet::Iterator OopStorageSet::weak_iterator() {
return Iterator(weak_start, weak_end, Iterator::weak);
}
inline OopStorageSet::Iterator OopStorageSet::all_iterator() {
return Iterator(all_start, all_end, Iterator::all);
}
#endif // SHARE_GC_SHARED_OOPSTORAGESET_HPP

@ -26,6 +26,7 @@
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/weakProcessorPhases.hpp"
#include "gc/shared/weakProcessorPhaseTimes.hpp"
@ -38,11 +39,11 @@
template <typename Container>
class OopsDoAndReportCounts {
public:
void operator()(BoolObjectClosure* is_alive, OopClosure* keep_alive, WeakProcessorPhase phase) {
void operator()(BoolObjectClosure* is_alive, OopClosure* keep_alive, OopStorage* storage) {
Container::reset_dead_counter();
CountingSkippedIsAliveClosure<BoolObjectClosure, OopClosure> cl(is_alive, keep_alive);
WeakProcessorPhases::oop_storage(phase)->oops_do(&cl);
storage->oops_do(&cl);
Container::inc_dead_counter(cl.num_dead() + cl.num_skipped());
Container::finish_dead_counter();
@ -50,17 +51,19 @@ public:
};
void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
if (WeakProcessorPhases::is_serial(phase)) {
WeakProcessorPhases::processor(phase)(is_alive, keep_alive);
WeakProcessorPhases::Iterator pit = WeakProcessorPhases::serial_iterator();
for ( ; !pit.is_end(); ++pit) {
WeakProcessorPhases::processor(*pit)(is_alive, keep_alive);
}
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
if (OopStorageSet::string_table_weak() == *it) {
OopsDoAndReportCounts<StringTable>()(is_alive, keep_alive, *it);
} else if (OopStorageSet::resolved_method_table_weak() == *it) {
OopsDoAndReportCounts<ResolvedMethodTable>()(is_alive, keep_alive, *it);
} else {
if (WeakProcessorPhases::is_stringtable(phase)) {
OopsDoAndReportCounts<StringTable>()(is_alive, keep_alive, phase);
} else if (WeakProcessorPhases::is_resolved_method_table(phase)){
OopsDoAndReportCounts<ResolvedMethodTable>()(is_alive, keep_alive, phase);
} else {
WeakProcessorPhases::oop_storage(phase)->weak_oops_do(is_alive, keep_alive);
}
it->weak_oops_do(is_alive, keep_alive);
}
}
}
@ -86,8 +89,9 @@ uint WeakProcessor::ergo_workers(uint max_workers) {
// and development oriented, so not allocating any threads
// specifically for them is okay.
size_t ref_count = 0;
FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
ref_count += WeakProcessorPhases::oop_storage(phase)->allocation_count();
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
ref_count += it->allocation_count();
}
// +1 to (approx) round up the ref per thread division.
@ -106,14 +110,16 @@ void WeakProcessor::Task::initialize() {
_phase_times->set_active_workers(_nworkers);
}
uint storage_count = WeakProcessorPhases::oop_storage_phase_count;
uint storage_count = WeakProcessorPhases::oopstorage_phase_count;
_storage_states = NEW_C_HEAP_ARRAY(StorageState, storage_count, mtGC);
StorageState* states = _storage_states;
FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
OopStorage* storage = WeakProcessorPhases::oop_storage(phase);
new (states++) StorageState(storage, _nworkers);
StorageState* cur_state = _storage_states;
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it, ++cur_state) {
assert((cur_state - _storage_states) < storage_count, "invariant");
new (cur_state) StorageState(*it, _nworkers);
}
assert((cur_state - _storage_states) == storage_count, "invariant");
StringTable::reset_dead_counter();
ResolvedMethodTable::reset_dead_counter();
}
@ -139,7 +145,7 @@ WeakProcessor::Task::Task(WeakProcessorPhaseTimes* phase_times, uint nworkers) :
WeakProcessor::Task::~Task() {
if (_storage_states != NULL) {
StorageState* states = _storage_states;
FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
for (uint i = 0; i < WeakProcessorPhases::oopstorage_phase_count; ++i) {
states->StorageState::~StorageState();
++states;
}

@ -28,6 +28,7 @@
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/weakProcessorPhases.hpp"
#include "gc/shared/weakProcessorPhaseTimes.hpp"
@ -94,34 +95,39 @@ void WeakProcessor::Task::work(uint worker_id,
"worker_id (%u) exceeds task's configured workers (%u)",
worker_id, _nworkers);
FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
if (WeakProcessorPhases::is_serial(phase)) {
CountingIsAliveClosure<IsAlive> cl(is_alive);
uint serial_index = WeakProcessorPhases::serial_index(phase);
if (_serial_phases_done.try_claim_task(serial_index)) {
WeakProcessorPhaseTimeTracker pt(_phase_times, phase);
WeakProcessorPhases::processor(phase)(&cl, keep_alive);
if (_phase_times != NULL) {
_phase_times->record_phase_items(phase, cl.num_dead(), cl.num_total());
}
}
} else {
CountingSkippedIsAliveClosure<IsAlive, KeepAlive> cl(is_alive, keep_alive);
WeakProcessorPhaseTimeTracker pt(_phase_times, phase, worker_id);
uint storage_index = WeakProcessorPhases::oop_storage_index(phase);
_storage_states[storage_index].oops_do(&cl);
typedef WeakProcessorPhases::Iterator Iterator;
for (Iterator it = WeakProcessorPhases::serial_iterator(); !it.is_end(); ++it) {
WeakProcessorPhase phase = *it;
CountingIsAliveClosure<IsAlive> cl(is_alive);
uint serial_index = WeakProcessorPhases::serial_index(phase);
if (_serial_phases_done.try_claim_task(serial_index)) {
WeakProcessorPhaseTimeTracker pt(_phase_times, phase);
WeakProcessorPhases::processor(phase)(&cl, keep_alive);
if (_phase_times != NULL) {
_phase_times->record_worker_items(worker_id, phase, cl.num_dead(), cl.num_total());
}
if (WeakProcessorPhases::is_stringtable(phase)) {
StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
}
if (WeakProcessorPhases::is_resolved_method_table(phase)) {
ResolvedMethodTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
_phase_times->record_phase_items(phase, cl.num_dead(), cl.num_total());
}
}
}
for (Iterator it = WeakProcessorPhases::oopstorage_iterator(); !it.is_end(); ++it) {
WeakProcessorPhase phase = *it;
CountingSkippedIsAliveClosure<IsAlive, KeepAlive> cl(is_alive, keep_alive);
WeakProcessorPhaseTimeTracker pt(_phase_times, phase, worker_id);
uint oopstorage_index = WeakProcessorPhases::oopstorage_index(phase);
StorageState& cur_state = _storage_states[oopstorage_index];
cur_state.oops_do(&cl);
if (_phase_times != NULL) {
_phase_times->record_worker_items(worker_id, phase, cl.num_dead(), cl.num_total());
}
const OopStorage* cur_storage = cur_state.storage();
if (cur_storage == OopStorageSet::string_table_weak()) {
StringTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
} else if (cur_storage == OopStorageSet::resolved_method_table_weak()) {
ResolvedMethodTable::inc_dead_counter(cl.num_dead() + cl.num_skipped());
}
}
_serial_phases_done.all_tasks_completed(_nworkers);
}

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/weakProcessorPhases.hpp"
#include "gc/shared/weakProcessorPhaseTimes.hpp"
#include "gc/shared/workerDataArray.inline.hpp"
@ -32,17 +33,22 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
static uint phase_index(WeakProcessorPhase phase) {
return WeakProcessorPhases::index(phase);
static uint serial_phase_index(WeakProcessorPhase phase) {
return WeakProcessorPhases::serial_index(phase);
}
static bool is_serial_phase(WeakProcessorPhase phase) {
return WeakProcessorPhases::is_serial(phase);
}
static void assert_oop_storage_phase(WeakProcessorPhase phase) {
assert(WeakProcessorPhases::is_oop_storage(phase),
"Not an oop_storage phase %u", phase_index(phase));
static void assert_serial_phase(WeakProcessorPhase phase) {
assert(is_serial_phase(phase),
"Not a serial phase %u", static_cast<uint>(phase));
}
static void assert_oopstorage_phase(WeakProcessorPhase phase) {
assert(WeakProcessorPhases::is_oopstorage(phase),
"Not an oopstorage phase %u", static_cast<uint>(phase));
}
const double uninitialized_time = -1.0;
@ -64,6 +70,12 @@ static void reset_items(size_t* items, size_t nitems) {
}
}
void WeakProcessorPhaseTimes::reset_phase_data() {
reset_times(_phase_times_sec, ARRAY_SIZE(_phase_times_sec));
reset_items(_phase_dead_items, ARRAY_SIZE(_phase_dead_items));
reset_items(_phase_total_items, ARRAY_SIZE(_phase_total_items));
}
WeakProcessorPhaseTimes::WeakProcessorPhaseTimes(uint max_threads) :
_max_threads(max_threads),
_active_workers(0),
@ -74,20 +86,19 @@ WeakProcessorPhaseTimes::WeakProcessorPhaseTimes(uint max_threads) :
{
assert(_max_threads > 0, "max_threads must not be zero");
reset_times(_phase_times_sec, ARRAY_SIZE(_phase_times_sec));
reset_items(_phase_dead_items, ARRAY_SIZE(_phase_dead_items));
reset_items(_phase_total_items, ARRAY_SIZE(_phase_total_items));
reset_phase_data();
if (_max_threads > 1) {
WorkerDataArray<double>** wpt = _worker_data;
FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(phase) {
const char* description = WeakProcessorPhases::description(phase);
*wpt = new WorkerDataArray<double>(_max_threads, description);
(*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Dead"), DeadItems);
(*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Total"), TotalItems);
wpt++;
}
WorkerDataArray<double>** wpt = _worker_data;
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
assert(size_t(wpt - _worker_data) < ARRAY_SIZE(_worker_data), "invariant");
const char* description = it->name();
*wpt = new WorkerDataArray<double>(_max_threads, description);
(*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Dead"), DeadItems);
(*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Total"), TotalItems);
wpt++;
}
assert(size_t(wpt - _worker_data) == ARRAY_SIZE(_worker_data), "invariant");
}
WeakProcessorPhaseTimes::~WeakProcessorPhaseTimes() {
@ -115,13 +126,9 @@ void WeakProcessorPhaseTimes::set_active_workers(uint n) {
void WeakProcessorPhaseTimes::reset() {
_active_workers = 0;
_total_time_sec = uninitialized_time;
reset_times(_phase_times_sec, ARRAY_SIZE(_phase_times_sec));
reset_items(_phase_dead_items, ARRAY_SIZE(_phase_dead_items));
reset_items(_phase_total_items, ARRAY_SIZE(_phase_total_items));
if (_max_threads > 1) {
for (size_t i = 0; i < ARRAY_SIZE(_worker_data); ++i) {
_worker_data[i]->reset();
}
reset_phase_data();
for (size_t i = 0; i < ARRAY_SIZE(_worker_data); ++i) {
_worker_data[i]->reset();
}
}
@ -136,19 +143,22 @@ void WeakProcessorPhaseTimes::record_total_time_sec(double time_sec) {
}
double WeakProcessorPhaseTimes::phase_time_sec(WeakProcessorPhase phase) const {
assert(is_initialized_time(_phase_times_sec[phase_index(phase)]),
"phase time not set %u", phase_index(phase));
return _phase_times_sec[phase_index(phase)];
assert_serial_phase(phase);
assert(is_initialized_time(_phase_times_sec[serial_phase_index(phase)]),
"phase time not set %u", serial_phase_index(phase));
return _phase_times_sec[serial_phase_index(phase)];
}
void WeakProcessorPhaseTimes::record_phase_time_sec(WeakProcessorPhase phase, double time_sec) {
assert(!is_initialized_time(_phase_times_sec[phase_index(phase)]),
"Already set time for phase %u", phase_index(phase));
_phase_times_sec[phase_index(phase)] = time_sec;
assert_serial_phase(phase);
assert(!is_initialized_time(_phase_times_sec[serial_phase_index(phase)]),
"Already set time for phase %u", serial_phase_index(phase));
_phase_times_sec[serial_phase_index(phase)] = time_sec;
}
void WeakProcessorPhaseTimes::record_phase_items(WeakProcessorPhase phase, size_t num_dead, size_t num_total) {
uint p = phase_index(phase);
assert_serial_phase(phase);
uint p = serial_phase_index(phase);
assert(!is_initialized_items(_phase_dead_items[p]),
"Already set dead items for phase %u", p);
assert(!is_initialized_items(_phase_total_items[p]),
@ -158,41 +168,29 @@ void WeakProcessorPhaseTimes::record_phase_items(WeakProcessorPhase phase, size_
}
WorkerDataArray<double>* WeakProcessorPhaseTimes::worker_data(WeakProcessorPhase phase) const {
assert_oop_storage_phase(phase);
assert(active_workers() > 1, "No worker data when single-threaded");
return _worker_data[WeakProcessorPhases::oop_storage_index(phase)];
assert_oopstorage_phase(phase);
return _worker_data[WeakProcessorPhases::oopstorage_index(phase)];
}
double WeakProcessorPhaseTimes::worker_time_sec(uint worker_id, WeakProcessorPhase phase) const {
assert(worker_id < active_workers(),
"invalid worker id %u for %u", worker_id, active_workers());
if (active_workers() == 1) {
return phase_time_sec(phase);
} else {
return worker_data(phase)->get(worker_id);
}
return worker_data(phase)->get(worker_id);
}
void WeakProcessorPhaseTimes::record_worker_time_sec(uint worker_id,
WeakProcessorPhase phase,
double time_sec) {
if (active_workers() == 1) {
record_phase_time_sec(phase, time_sec);
} else {
worker_data(phase)->set(worker_id, time_sec);
}
worker_data(phase)->set(worker_id, time_sec);
}
void WeakProcessorPhaseTimes::record_worker_items(uint worker_id,
WeakProcessorPhase phase,
size_t num_dead,
size_t num_total) {
if (active_workers() == 1) {
record_phase_items(phase, num_dead, num_total);
} else {
worker_data(phase)->set_or_add_thread_work_item(worker_id, num_dead, DeadItems);
worker_data(phase)->set_or_add_thread_work_item(worker_id, num_total, TotalItems);
}
WorkerDataArray<double>* phase_data = worker_data(phase);
phase_data->set_or_add_thread_work_item(worker_id, num_dead, DeadItems);
phase_data->set_or_add_thread_work_item(worker_id, num_total, TotalItems);
}
static double elapsed_time_sec(Ticks start_time, Ticks end_time) {
@ -219,7 +217,7 @@ WeakProcessorPhaseTimeTracker::WeakProcessorPhaseTimeTracker(WeakProcessorPhaseT
_worker_id(worker_id),
_start_time(Ticks::now())
{
assert_oop_storage_phase(_phase);
assert_oopstorage_phase(_phase);
assert(_times == NULL || worker_id < _times->active_workers(),
"Invalid worker_id %u", worker_id);
}
@ -231,7 +229,7 @@ WeakProcessorPhaseTimeTracker::WeakProcessorPhaseTimeTracker(WeakProcessorPhaseT
_worker_id(0),
_start_time(Ticks::now())
{
assert(is_serial_phase(phase), "Not a serial phase %u", phase_index(phase));
assert_serial_phase(phase);
}
WeakProcessorPhaseTimeTracker::~WeakProcessorPhaseTimeTracker() {
@ -259,6 +257,7 @@ static const char* indent_str(size_t i) {
void WeakProcessorPhaseTimes::log_st_phase(WeakProcessorPhase phase,
uint indent) const {
assert_serial_phase(phase);
log_debug(gc, phases)("%s%s: " TIME_FORMAT,
indent_str(indent),
WeakProcessorPhases::description(phase),
@ -267,12 +266,12 @@ void WeakProcessorPhaseTimes::log_st_phase(WeakProcessorPhase phase,
log_debug(gc, phases)("%s%s: " SIZE_FORMAT,
indent_str(indent + 1),
"Dead",
_phase_dead_items[phase_index(phase)]);
_phase_dead_items[serial_phase_index(phase)]);
log_debug(gc, phases)("%s%s: " SIZE_FORMAT,
indent_str(indent + 1),
"Total",
_phase_total_items[phase_index(phase)]);
_phase_total_items[serial_phase_index(phase)]);
}
void WeakProcessorPhaseTimes::log_mt_phase_summary(WeakProcessorPhase phase,
@ -306,12 +305,12 @@ void WeakProcessorPhaseTimes::log_mt_phase_details(WorkerDataArray<T>* data,
void WeakProcessorPhaseTimes::log_print_phases(uint indent) const {
if (log_is_enabled(Debug, gc, phases)) {
FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
if (is_serial_phase(phase) || (active_workers() == 1)) {
log_st_phase(phase, indent);
} else {
log_mt_phase_summary(phase, indent);
}
typedef WeakProcessorPhases::Iterator Iterator;
for (Iterator it = WeakProcessorPhases::serial_iterator(); !it.is_end(); ++it) {
log_st_phase(*it, indent);
}
for (Iterator it = WeakProcessorPhases::oopstorage_iterator(); !it.is_end(); ++it) {
log_mt_phase_summary(*it, indent);
}
}
}

@ -25,6 +25,7 @@
#ifndef SHARE_GC_SHARED_WEAKPROCESSORPHASETIMES_HPP
#define SHARE_GC_SHARED_WEAKPROCESSORPHASETIMES_HPP
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/weakProcessorPhases.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
@ -43,17 +44,20 @@ class WeakProcessorPhaseTimes : public CHeapObj<mtGC> {
// Total time for weak processor.
double _total_time_sec;
// Total time for each serially processed phase. Entries for phases
// processed by multiple threads are unused, as are entries for
// unexecuted phases.
double _phase_times_sec[WeakProcessorPhases::phase_count];
size_t _phase_dead_items[WeakProcessorPhases::phase_count];
size_t _phase_total_items[WeakProcessorPhases::phase_count];
// Total time and associated items for each serially processed phase.
static const uint phase_data_count = WeakProcessorPhases::serial_phase_count;
// +1 because serial_phase_count == 0 in some build configurations.
// Simpler to always allocate extra space than conditionalize.
double _phase_times_sec[phase_data_count + 1];
size_t _phase_dead_items[phase_data_count + 1];
size_t _phase_total_items[phase_data_count + 1];
void reset_phase_data();
// Per-worker times and linked items, if multiple threads used and the phase was executed.
WorkerDataArray<double>* _worker_data[WeakProcessorPhases::oop_storage_phase_count];
WorkerDataArray<size_t>* _worker_dead_items[WeakProcessorPhases::oop_storage_phase_count];
WorkerDataArray<size_t>* _worker_total_items[WeakProcessorPhases::oop_storage_phase_count];
// Per-worker times and linked items.
static const uint worker_data_count = WeakProcessorPhases::oopstorage_phase_count;
WorkerDataArray<double>* _worker_data[worker_data_count];
WorkerDataArray<size_t>* _worker_dead_items[worker_data_count];
WorkerDataArray<size_t>* _worker_total_items[worker_data_count];
WorkerDataArray<double>* worker_data(WeakProcessorPhase phase) const;
@ -114,7 +118,7 @@ public:
// For tracking possibly parallel phase times (even if processed by
// only one thread).
// Precondition: WeakProcessorPhases::is_oop_storage(phase)
// Precondition: WeakProcessorPhases::is_oopstorage(phase)
// Precondition: worker_id < times->max_threads().
WeakProcessorPhaseTimeTracker(WeakProcessorPhaseTimes* times,
WeakProcessorPhase phase,

@ -23,11 +23,8 @@
*/
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/weakProcessorPhases.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/jniHandles.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
@ -39,53 +36,84 @@
#include "prims/jvmtiExport.hpp"
#endif // INCLUDE_JVMTI
WeakProcessorPhases::Phase WeakProcessorPhases::phase(uint value) {
assert(value < phase_count, "Invalid phase value %u", value);
return static_cast<Phase>(value);
// serial_phase_count is 0 if JFR and JVMTI are both not built,
// requiring some code to be careful to avoid tautological checks
// that some compilers warn about.
#define HAVE_SERIAL_PHASES (INCLUDE_JVMTI || INCLUDE_JFR)
WeakProcessorPhases::Phase WeakProcessorPhases::serial_phase(uint value) {
#if HAVE_SERIAL_PHASES
assert(value < serial_phase_count, "Invalid serial phase value %u", value);
return static_cast<Phase>(value + serial_phase_start);
#else
STATIC_ASSERT(serial_phase_count == 0);
fatal("invalid serial phase value %u", value);
return static_cast<Phase>(serial_phase_start);
#endif // HAVE_SERIAL_PHASES
}
uint WeakProcessorPhases::index(Phase phase) {
uint value = static_cast<uint>(phase);
assert(value < phase_count, "Invalid phase %u", value);
return value;
WeakProcessorPhases::Phase WeakProcessorPhases::oopstorage_phase(uint value) {
assert(value < oopstorage_phase_count, "Invalid oopstorage phase value %u", value);
return static_cast<Phase>(value + oopstorage_phase_start);
}
static uint raw_phase_index(WeakProcessorPhases::Phase phase) {
return static_cast<uint>(phase);
}
uint WeakProcessorPhases::serial_index(Phase phase) {
assert(is_serial(phase), "not serial phase %u", index(phase));
return index(phase) - serial_phase_start;
assert(is_serial(phase), "not serial phase %u", raw_phase_index(phase));
return raw_phase_index(phase) - serial_phase_start;
}
uint WeakProcessorPhases::oop_storage_index(Phase phase) {
assert(is_oop_storage(phase), "not oop storage phase %u", index(phase));
return index(phase) - oop_storage_phase_start;
uint WeakProcessorPhases::oopstorage_index(Phase phase) {
assert(is_oopstorage(phase), "not oopstorage phase %u", raw_phase_index(phase));
return raw_phase_index(phase) - oopstorage_phase_start;
}
static bool is_phase(WeakProcessorPhases::Phase phase, uint start, uint count) {
return (raw_phase_index(phase) - start) < count;
}
bool WeakProcessorPhases::is_serial(Phase phase) {
// serial_phase_count is 0 if JFR and JVMTI are both not built,
// making this check with unsigned lhs redundant
#if INCLUDE_JVMTI || INCLUDE_JFR
return (index(phase) - serial_phase_start) < serial_phase_count;
#if HAVE_SERIAL_PHASES
return is_phase(phase, serial_phase_start, serial_phase_count);
#else
STATIC_ASSERT(serial_phase_count == 0);
return false;
#endif
#endif // HAVE_SERIAL_PHASES
}
bool WeakProcessorPhases::is_oop_storage(Phase phase) {
return (index(phase) - oop_storage_phase_start) < oop_storage_phase_count;
bool WeakProcessorPhases::is_oopstorage(Phase phase) {
return is_phase(phase, oopstorage_phase_start, oopstorage_phase_count);
}
#ifdef ASSERT
void WeakProcessorPhases::Iterator::verify_nonsingular() const {
assert(_limit != singular_value, "precondition");
}
void WeakProcessorPhases::Iterator::verify_category_match(const Iterator& other) const {
verify_nonsingular();
assert(_limit == other._limit, "precondition");
}
void WeakProcessorPhases::Iterator::verify_dereferenceable() const {
verify_nonsingular();
assert(_index < _limit, "precondition");
}
#endif // ASSERT
const char* WeakProcessorPhases::description(Phase phase) {
switch (phase) {
JVMTI_ONLY(case jvmti: return "JVMTI weak processing";)
JFR_ONLY(case jfr: return "JFR weak processing";)
case jni: return "JNI weak processing";
case stringtable: return "StringTable weak processing";
case resolved_method_table: return "ResolvedMethodTable weak processing";
case vm: return "VM weak processing";
default:
ShouldNotReachHere();
return "Invalid weak processing phase";
return "Invalid serial weak processing phase";
}
}
@ -98,23 +126,3 @@ WeakProcessorPhases::Processor WeakProcessorPhases::processor(Phase phase) {
return NULL;
}
}
OopStorage* WeakProcessorPhases::oop_storage(Phase phase) {
switch (phase) {
case jni: return JNIHandles::weak_global_handles();
case stringtable: return StringTable::weak_storage();
case resolved_method_table: return ResolvedMethodTable::weak_storage();
case vm: return SystemDictionary::vm_weak_oop_storage();
default:
ShouldNotReachHere();
return NULL;
}
}
bool WeakProcessorPhases::is_stringtable(Phase phase) {
return phase == stringtable;
}
bool WeakProcessorPhases::is_resolved_method_table(Phase phase) {
return phase == resolved_method_table;
}

@ -25,6 +25,7 @@
#ifndef SHARE_GC_SHARED_WEAKPROCESSORPHASES_HPP
#define SHARE_GC_SHARED_WEAKPROCESSORPHASES_HPP
#include "gc/shared/oopStorageSet.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -35,54 +36,117 @@ class OopStorage;
class WeakProcessorPhases : AllStatic {
public:
class Iterator;
typedef void (*Processor)(BoolObjectClosure*, OopClosure*);
enum Phase {
// Serial phases.
JVMTI_ONLY(jvmti COMMA)
JFR_ONLY(jfr COMMA)
JVMTI_ONLY(jvmti JFR_ONLY(COMMA))
JFR_ONLY(jfr)
// OopStorage phases.
jni,
stringtable,
resolved_method_table,
vm
// Additional implicit phase values follow for oopstorages.
};
static const uint serial_phase_start = 0;
static const uint serial_phase_count = jni;
static const uint oop_storage_phase_start = serial_phase_count;
static const uint oop_storage_phase_count = (vm + 1) - oop_storage_phase_start;
static const uint phase_count = serial_phase_count + oop_storage_phase_count;
static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1) JFR_ONLY(+ 1);
static const uint oopstorage_phase_start = serial_phase_count;
static const uint oopstorage_phase_count = OopStorageSet::weak_count;
static const uint phase_count = serial_phase_count + oopstorage_phase_count;
// Precondition: value < serial_phase_count
static Phase serial_phase(uint value);
// Precondition: value < oopstorage_phase_count
static Phase oopstorage_phase(uint value);
static Phase phase(uint value);
static uint index(Phase phase);
// Indexes relative to the corresponding phase_start constant.
// Precondition: is_serial(phase) or is_oopstorage(phase) accordingly
static uint serial_index(Phase phase);
static uint oop_storage_index(Phase phase);
static uint oopstorage_index(Phase phase);
static bool is_serial(Phase phase);
static bool is_oop_storage(Phase phase);
static bool is_oopstorage(Phase phase);
static Iterator serial_iterator();
static Iterator oopstorage_iterator();
// Precondition: is_serial(phase)
static const char* description(Phase phase);
static Processor processor(Phase phase); // Precondition: is_serial(phase)
static OopStorage* oop_storage(Phase phase); // Precondition: is_oop_storage(phase)
static bool is_stringtable(Phase phase);
static bool is_resolved_method_table(Phase phase);
// Precondition: is_serial(phase)
static Processor processor(Phase phase);
};
typedef WeakProcessorPhases::Phase WeakProcessorPhase;
#define FOR_EACH_WEAK_PROCESSOR_PHASE(P) \
for (WeakProcessorPhase P = static_cast<WeakProcessorPhase>(0); \
static_cast<uint>(P) < WeakProcessorPhases::phase_count; \
P = static_cast<WeakProcessorPhase>(static_cast<uint>(P) + 1))
class WeakProcessorPhases::Iterator {
friend class WeakProcessorPhases;
#define FOR_EACH_WEAK_PROCESSOR_OOP_STORAGE_PHASE(P) \
for (WeakProcessorPhase P = static_cast<WeakProcessorPhase>(WeakProcessorPhases::oop_storage_phase_start); \
static_cast<uint>(P) < (WeakProcessorPhases::oop_storage_phase_start + \
WeakProcessorPhases::oop_storage_phase_count); \
P = static_cast<WeakProcessorPhase>(static_cast<uint>(P) + 1))
uint _index;
uint _limit;
Iterator(uint index, uint limit) : _index(index), _limit(limit) {}
static const uint singular_value = UINT_MAX;
void verify_nonsingular() const NOT_DEBUG_RETURN;
void verify_category_match(const Iterator& other) const NOT_DEBUG_RETURN;
void verify_dereferenceable() const NOT_DEBUG_RETURN;
public:
// Construct a singular iterator for later assignment. The only valid
// operations are destruction and assignment.
Iterator() : _index(singular_value), _limit(singular_value) {}
bool is_end() const {
verify_nonsingular();
return _index == _limit;
}
bool operator==(const Iterator& other) const {
verify_category_match(other);
return _index == other._index;
}
bool operator!=(const Iterator& other) const {
return !operator==(other);
}
Phase operator*() const {
verify_dereferenceable();
return static_cast<Phase>(_index);
}
// Phase doesn't have members, so no operator->().
Iterator& operator++() {
verify_dereferenceable();
++_index;
return *this;
}
Iterator operator++(int) {
verify_dereferenceable();
return Iterator(_index++, _limit);
}
Iterator begin() const {
verify_nonsingular();
return *this;
}
Iterator end() const {
verify_nonsingular();
return Iterator(_limit, _limit);
}
};
inline WeakProcessorPhases::Iterator WeakProcessorPhases::serial_iterator() {
return Iterator(serial_phase_start, serial_phase_start + serial_phase_count);
}
inline WeakProcessorPhases::Iterator WeakProcessorPhases::oopstorage_iterator() {
return Iterator(oopstorage_phase_start, oopstorage_phase_start + oopstorage_phase_count);
}
#endif // SHARE_GC_SHARED_WEAKPROCESSORPHASES_HPP

@ -30,6 +30,7 @@
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zBarrierSetNMethod.hpp"
#include "gc/z/zGlobals.hpp"
@ -43,7 +44,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/atomic.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/thread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/synchronizer.hpp"
@ -265,8 +265,8 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_ex
}
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
_jni_handles_iter(JNIHandles::global_handles()),
_vm_handles_iter(SystemDictionary::vm_global_oop_storage()),
_jni_handles_iter(OopStorageSet::jni_global()),
_vm_handles_iter(OopStorageSet::vm_global()),
_cld_claim(cld_claim),
_jni_handles(this),
_vm_handles(this),
@ -337,10 +337,10 @@ void ZWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
}
ZConcurrentWeakRootsIterator::ZConcurrentWeakRootsIterator() :
_vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
_jni_weak_handles_iter(JNIHandles::weak_global_handles()),
_string_table_iter(StringTable::weak_storage()),
_resolved_method_table_iter(ResolvedMethodTable::weak_storage()),
_vm_weak_handles_iter(OopStorageSet::vm_weak()),
_jni_weak_handles_iter(OopStorageSet::jni_weak()),
_string_table_iter(OopStorageSet::string_table_weak()),
_resolved_method_table_iter(OopStorageSet::resolved_method_table_weak()),
_vm_weak_handles(this),
_jni_weak_handles(this),
_string_table(this),

@ -25,13 +25,13 @@
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "jvmci/jvmci.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/metadataHandleBlock.hpp"
#include "memory/universe.hpp"
OopStorage* JVMCI::_object_handles = NULL;
MetadataHandleBlock* JVMCI::_metadata_handles = NULL;
JVMCIRuntime* JVMCI::_compiler_runtime = NULL;
JVMCIRuntime* JVMCI::_java_runtime = NULL;
@ -58,7 +58,6 @@ void JVMCI::initialize_compiler(TRAPS) {
}
void JVMCI::initialize_globals() {
_object_handles = SystemDictionary::vm_global_oop_storage();
_metadata_handles = MetadataHandleBlock::allocate_block();
if (UseJVMCINativeLibrary) {
// There are two runtimes.
@ -70,9 +69,9 @@ void JVMCI::initialize_globals() {
}
}
OopStorage* JVMCI::object_handles() {
assert(_object_handles != NULL, "Uninitialized");
return _object_handles;
// Handles to objects in the Hotspot heap.
static OopStorage* object_handles() {
return OopStorageSet::vm_global();
}
jobject JVMCI::make_global(const Handle& obj) {

@ -45,11 +45,6 @@ class JVMCI : public AllStatic {
friend class JVMCIEnv;
private:
// Handles to objects in the HotSpot heap.
static OopStorage* _object_handles;
static OopStorage* object_handles();
// Handles to Metadata objects.
static MetadataHandleBlock* _metadata_handles;

@ -670,8 +670,6 @@ jint universe_init() {
Universe::initialize_tlab();
SystemDictionary::initialize_oop_storage();
Metaspace::global_initialize();
// Initialize performance counters for metaspaces

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,26 +23,24 @@
*/
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "utilities/debug.hpp"
#include "utilities/ostream.hpp"
template <> OopStorage* WeakHandle<vm_class_loader_data>::get_storage() {
return SystemDictionary::vm_weak_oop_storage();
return OopStorageSet::vm_weak();
}
template <> OopStorage* WeakHandle<vm_string_table_data>::get_storage() {
return StringTable::weak_storage();
return OopStorageSet::string_table_weak();
}
template <> OopStorage* WeakHandle<vm_resolved_method_table_data>::get_storage() {
return ResolvedMethodTable::weak_storage();
return OopStorageSet::resolved_method_table_weak();
}
template <WeakHandleType T>
@ -50,7 +48,9 @@ WeakHandle<T> WeakHandle<T>::create(Handle obj) {
assert(obj() != NULL, "no need to create weak null oop");
oop* oop_addr = get_storage()->allocate();
if (oop_addr == NULL) {
vm_exit_out_of_memory(sizeof(oop*), OOM_MALLOC_ERROR, "Unable to create new weak oop handle in OopStorage");
vm_exit_out_of_memory(sizeof(oop*), OOM_MALLOC_ERROR,
"Unable to create new weak oop handle in OopStorage %s",
get_storage()->name());
}
// Create WeakHandle with address returned and store oop into it.
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_addr, obj());

@ -25,13 +25,14 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/handles.inline.hpp"
@ -90,7 +91,6 @@ class ResolvedMethodTableConfig : public AllStatic {
static ResolvedMethodTableHash* _local_table = NULL;
static size_t _current_size = (size_t)1 << ResolvedMethodTableSizeLog;
OopStorage* ResolvedMethodTable::_weak_handles = NULL;
volatile bool ResolvedMethodTable::_has_work = false;
volatile size_t _items_count = 0;
@ -98,9 +98,6 @@ volatile size_t _uncleaned_items_count = 0;
void ResolvedMethodTable::create_table() {
_local_table = new ResolvedMethodTableHash(ResolvedMethodTableSizeLog, END_SIZE, GROW_HINT);
_weak_handles = new OopStorage("ResolvedMethodTable weak",
ResolvedMethodTableWeakAlloc_lock,
ResolvedMethodTableWeakActive_lock);
log_trace(membername, table)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, ResolvedMethodTableSizeLog);
}

@ -25,7 +25,6 @@
#ifndef SHARE_PRIMS_RESOLVEDMETHODTABLE_HPP
#define SHARE_PRIMS_RESOLVEDMETHODTABLE_HPP
#include "gc/shared/oopStorage.hpp"
#include "memory/allocation.hpp"
#include "oops/symbol.hpp"
#include "oops/weakHandle.hpp"
@ -34,9 +33,8 @@ class ResolvedMethodTable;
class ResolvedMethodTableConfig;
class ResolvedMethodTable : public AllStatic {
static OopStorage* _weak_handles;
static volatile bool _has_work;
public:
// Initialization
static void create_table();
@ -54,9 +52,6 @@ public:
// Cleaning
static bool has_work() { return _has_work; }
// GC Support - Backing storage for the oop*s
static OopStorage* weak_storage() { return _weak_handles; }
// Cleaning and table management
static double get_load_factor();

@ -50,6 +50,7 @@
void check_ThreadShadow();
void eventlog_init();
void mutex_init();
void oopstorage_init();
void chunkpool_init();
void perfMemory_init();
void SuspendibleThreadSet_init();
@ -98,6 +99,7 @@ void vm_init_globals() {
basic_types_init();
eventlog_init();
mutex_init();
oopstorage_init();
chunkpool_init();
perfMemory_init();
SuspendibleThreadSet_init();

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
@ -36,17 +37,21 @@
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
static OopStorage* global_handles() {
return OopStorageSet::jni_global();
}
static OopStorage* weak_global_handles() {
return OopStorageSet::jni_weak();
}
// Serviceability agent support.
OopStorage* JNIHandles::_global_handles = NULL;
OopStorage* JNIHandles::_weak_global_handles = NULL;
OopStorage* JNIHandles::global_handles() {
assert(_global_handles != NULL, "Uninitialized JNI global handles");
return _global_handles;
}
OopStorage* JNIHandles::weak_global_handles() {
assert(_weak_global_handles != NULL, "Uninitialized JNI weak global handles");
return _weak_global_handles;
void jni_handles_init() {
JNIHandles::_global_handles = global_handles();
JNIHandles::_weak_global_handles = weak_global_handles();
}
@ -197,16 +202,6 @@ void JNIHandles::weak_oops_do(OopClosure* f) {
}
void JNIHandles::initialize() {
_global_handles = new OopStorage("JNI Global",
JNIGlobalAlloc_lock,
JNIGlobalActive_lock);
_weak_global_handles = new OopStorage("JNI Weak",
JNIWeakAlloc_lock,
JNIWeakActive_lock);
}
inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
}
@ -332,11 +327,6 @@ bool JNIHandles::current_thread_in_native() {
}
void jni_handles_init() {
JNIHandles::initialize();
}
int JNIHandleBlock::_blocks_allocated = 0;
JNIHandleBlock* JNIHandleBlock::_block_free_list = NULL;
#ifndef PRODUCT

@ -37,8 +37,10 @@ class Thread;
class JNIHandles : AllStatic {
friend class VMStructs;
private:
// These are used by the serviceability agent.
static OopStorage* _global_handles;
static OopStorage* _weak_global_handles;
friend void jni_handles_init();
inline static bool is_jweak(jobject handle);
inline static oop* jobject_ptr(jobject handle); // NOT jweak!
@ -122,9 +124,6 @@ class JNIHandles : AllStatic {
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
// Traversal of weak global handles.
static void weak_oops_do(OopClosure* f);
static OopStorage* global_handles();
static OopStorage* weak_global_handles();
};

@ -46,19 +46,7 @@ Mutex* Module_lock = NULL;
Mutex* CompiledIC_lock = NULL;
Mutex* InlineCacheBuffer_lock = NULL;
Mutex* VMStatistic_lock = NULL;
Mutex* JNIGlobalAlloc_lock = NULL;
Mutex* JNIGlobalActive_lock = NULL;
Mutex* JNIWeakAlloc_lock = NULL;
Mutex* JNIWeakActive_lock = NULL;
Mutex* StringTableWeakAlloc_lock = NULL;
Mutex* StringTableWeakActive_lock = NULL;
Mutex* JNIHandleBlockFreeList_lock = NULL;
Mutex* VMGlobalAlloc_lock = NULL;
Mutex* VMGlobalActive_lock = NULL;
Mutex* VMWeakAlloc_lock = NULL;
Mutex* VMWeakActive_lock = NULL;
Mutex* ResolvedMethodTableWeakAlloc_lock = NULL;
Mutex* ResolvedMethodTableWeakActive_lock = NULL;
Mutex* JmethodIdCreation_lock = NULL;
Mutex* JfieldIdCreation_lock = NULL;
Monitor* JNICritical_lock = NULL;
@ -214,18 +202,6 @@ void mutex_init() {
def(CGC_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // coordinate between fore- and background GC
def(STS_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
def(VMGlobalAlloc_lock , PaddedMutex , oopstorage, true, Monitor::_safepoint_check_never);
def(VMGlobalActive_lock , PaddedMutex , oopstorage-1,true, Monitor::_safepoint_check_never);
def(VMWeakAlloc_lock , PaddedMutex , oopstorage, true, Monitor::_safepoint_check_never);
def(VMWeakActive_lock , PaddedMutex , oopstorage-1,true, Monitor::_safepoint_check_never);
def(StringTableWeakAlloc_lock , PaddedMutex , oopstorage, true, Monitor::_safepoint_check_never);
def(StringTableWeakActive_lock , PaddedMutex , oopstorage-1,true, Monitor::_safepoint_check_never);
def(ResolvedMethodTableWeakAlloc_lock , PaddedMutex , oopstorage, true, Monitor::_safepoint_check_never);
def(ResolvedMethodTableWeakActive_lock , PaddedMutex , oopstorage-1, true, Monitor::_safepoint_check_never);
def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
if (UseG1GC) {
def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never);
@ -296,10 +272,6 @@ void mutex_init() {
def(InitCompleted_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
def(VtableStubs_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(Notify_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always);
def(JNIGlobalAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIGlobalActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNIWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions
def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);

@ -39,19 +39,7 @@ extern Mutex* Module_lock; // a lock on module and package
extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access
extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer
extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment
extern Mutex* JNIGlobalAlloc_lock; // JNI global storage allocate list lock
extern Mutex* JNIGlobalActive_lock; // JNI global storage active list lock
extern Mutex* JNIWeakAlloc_lock; // JNI weak storage allocate list lock
extern Mutex* JNIWeakActive_lock; // JNI weak storage active list lock
extern Mutex* StringTableWeakAlloc_lock; // StringTable weak storage allocate list lock
extern Mutex* StringTableWeakActive_lock; // STringTable weak storage active list lock
extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list
extern Mutex* VMGlobalAlloc_lock; // VM Global Handles storage allocate list lock
extern Mutex* VMGlobalActive_lock; // VM Global Handles storage active list lock
extern Mutex* VMWeakAlloc_lock; // VM Weak Handles storage allocate list lock
extern Mutex* VMWeakActive_lock; // VM Weak Handles storage active list lock
extern Mutex* ResolvedMethodTableWeakAlloc_lock; // ResolvedMethodTable weak storage allocate list
extern Mutex* ResolvedMethodTableWeakActive_lock; // ResolvedMethodTable weak storage active list
extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers
extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers
extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in

@ -27,6 +27,8 @@
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "memory/universe.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -83,22 +85,14 @@ void ServiceThread::initialize() {
}
}
static void cleanup_oopstorages(OopStorage* const* storages, size_t size) {
for (size_t i = 0; i < size; ++i) {
storages[i]->delete_empty_blocks();
static void cleanup_oopstorages() {
OopStorageSet::Iterator it = OopStorageSet::all_iterator();
for ( ; !it.is_end(); ++it) {
it->delete_empty_blocks();
}
}
void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
OopStorage* const oopstorages[] = {
JNIHandles::global_handles(),
JNIHandles::weak_global_handles(),
StringTable::weak_storage(),
SystemDictionary::vm_global_oop_storage(),
SystemDictionary::vm_weak_oop_storage()
};
const size_t oopstorage_count = ARRAY_SIZE(oopstorages);
while (true) {
bool sensors_changed = false;
bool has_jvmti_events = false;
@ -178,7 +172,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
}
if (oopstorage_work) {
cleanup_oopstorages(oopstorages, oopstorage_count);
cleanup_oopstorages();
}
}
}

@ -0,0 +1,105 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "unittest.hpp"
// GTEST assertions may introduce ODR-uses. Dodge them.
template<typename T> static T no_odr(T x) { return x; }
static void fill_strong(OopStorage** storages, size_t size) {
ASSERT_EQ(size, no_odr(OopStorageSet::strong_count));
STATIC_ASSERT(2 == OopStorageSet::strong_count);
storages[0] = OopStorageSet::jni_global();
storages[1] = OopStorageSet::vm_global();
}
static void fill_weak(OopStorage** storages, size_t size) {
ASSERT_EQ(size, no_odr(OopStorageSet::weak_count));
STATIC_ASSERT(4 == OopStorageSet::weak_count);
storages[0] = OopStorageSet::jni_weak();
storages[1] = OopStorageSet::vm_weak();
storages[2] = OopStorageSet::string_table_weak();
storages[3] = OopStorageSet::resolved_method_table_weak();
}
static void fill_all(OopStorage** storages, size_t size) {
ASSERT_EQ(size, no_odr(OopStorageSet::all_count));
const uint strong_count = OopStorageSet::strong_count;
fill_strong(storages, strong_count);
fill_weak(storages + strong_count, size - strong_count);
}
// Returns index of s in storages, or size if not found.
static size_t find_storage(OopStorage* s, OopStorage** storages, size_t size) {
for (uint i = 0; i < size; ++i) {
if (s == storages[i]) {
return i;
}
}
return size;
}
static void check_iterator(OopStorageSet::Iterator it,
OopStorage** storages,
size_t size) {
OopStorageSet::Iterator start = it;
ASSERT_EQ(start, it);
for ( ; !it.is_end(); ++it) {
size_t index = find_storage(*it, storages, size);
ASSERT_LT(index, size);
storages[index] = NULL;
}
ASSERT_NE(start, it);
const OopStorage* null_storage = NULL;
for (uint i = 0; i < size; ++i) {
ASSERT_EQ(null_storage, storages[i]);
}
}
static void test_iterator(uint count,
OopStorageSet::Iterator iterator,
void (*fill)(OopStorage**, size_t)) {
OopStorage** storages = NEW_C_HEAP_ARRAY(OopStorage*, count, mtGC);
fill(storages, count);
check_iterator(iterator, storages, count);
FREE_C_HEAP_ARRAY(OopStorage*, storages);
}
#define TEST_ITERATOR(kind) \
TEST_VM(OopStorageSetTest, PASTE_TOKENS(kind, _iterator)) { \
test_iterator(OopStorageSet::PASTE_TOKENS(kind, _count), \
OopStorageSet::PASTE_TOKENS(kind, _iterator)(), \
&PASTE_TOKENS(fill_, kind)); \
}
TEST_ITERATOR(strong);
TEST_ITERATOR(weak)
TEST_ITERATOR(all)

@ -159,9 +159,10 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Reference Processing", Level.DEBUG),
// VM internal reference processing
new LogMessageWithLevel("Weak Processing", Level.DEBUG),
new LogMessageWithLevel("JNI weak processing", Level.DEBUG),
new LogMessageWithLevel("StringTable weak processing", Level.DEBUG),
new LogMessageWithLevel("VM weak processing", Level.DEBUG),
new LogMessageWithLevel("JNI weak", Level.DEBUG),
new LogMessageWithLevel("StringTable weak", Level.DEBUG),
new LogMessageWithLevel("ResolvedMethodTable weak", Level.DEBUG),
new LogMessageWithLevel("VM weak", Level.DEBUG),
new LogMessageWithLevelC2OrJVMCIOnly("DerivedPointerTable Update", Level.DEBUG),
new LogMessageWithLevel("Start New Collection Set", Level.DEBUG),