8213574: Deadlock in string table expansion when dumping lots of CDS classes

Reviewed-by: jiangli, iklam, dholmes
This commit is contained in:
Robbin Ehn 2018-11-14 07:50:37 +01:00
parent 61082e6b25
commit 9a79d90cc6
4 changed files with 56 additions and 3 deletions

View File

@ -847,7 +847,7 @@ void StringTable::copy_shared_string_table(CompactHashtableWriter* writer) {
assert(HeapShared::is_heap_object_archiving_allowed(), "must be"); assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
CopyToArchive copy(writer); CopyToArchive copy(writer);
StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); StringTable::the_table()->_local_table->do_safepoint_scan(copy);
} }
void StringTable::write_to_archive() { void StringTable::write_to_archive() {

View File

@ -277,7 +277,7 @@ public:
void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) { void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
assert(DumpSharedSpaces, "called only during dump time"); assert(DumpSharedSpaces, "called only during dump time");
MetaspacePointersDo mpd(it); MetaspacePointersDo mpd(it);
SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd); SymbolTable::the_table()->_local_table->do_safepoint_scan(mpd);
} }
Symbol* SymbolTable::lookup_dynamic(const char* name, Symbol* SymbolTable::lookup_dynamic(const char* name,
@ -640,7 +640,7 @@ struct CopyToArchive : StackObj {
void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) { void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) {
CopyToArchive copy(writer); CopyToArchive copy(writer);
SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy); SymbolTable::the_table()->_local_table->do_safepoint_scan(copy);
} }
void SymbolTable::write_to_archive() { void SymbolTable::write_to_archive() {

View File

@ -472,6 +472,12 @@ class ConcurrentHashTable : public CHeapObj<F> {
template <typename SCAN_FUNC> template <typename SCAN_FUNC>
void do_scan(Thread* thread, SCAN_FUNC& scan_f); void do_scan(Thread* thread, SCAN_FUNC& scan_f);
// Visit all items with SCAN_FUNC without any protection.
// It will assume there is no other thread accessing this
// table during the safepoint. Must be called with VM thread.
template <typename SCAN_FUNC>
void do_safepoint_scan(SCAN_FUNC& scan_f);
// Destroying items matching EVALUATE_FUNC, before destroying items // Destroying items matching EVALUATE_FUNC, before destroying items
// DELETE_FUNC is called, if resize lock is obtained. Else returns false. // DELETE_FUNC is called, if resize lock is obtained. Else returns false.
template <typename EVALUATE_FUNC, typename DELETE_FUNC> template <typename EVALUATE_FUNC, typename DELETE_FUNC>

View File

@ -1116,6 +1116,8 @@ template <typename SCAN_FUNC>
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_scan(Thread* thread, SCAN_FUNC& scan_f) do_scan(Thread* thread, SCAN_FUNC& scan_f)
{ {
assert(!SafepointSynchronize::is_at_safepoint(),
"must be outside a safepoint");
assert(_resize_lock_owner != thread, "Re-size lock held"); assert(_resize_lock_owner != thread, "Re-size lock held");
lock_resize_lock(thread); lock_resize_lock(thread);
do_scan_locked(thread, scan_f); do_scan_locked(thread, scan_f);
@ -1123,6 +1125,49 @@ inline void ConcurrentHashTable<VALUE, CONFIG, F>::
assert(_resize_lock_owner != thread, "Re-size lock held"); assert(_resize_lock_owner != thread, "Re-size lock held");
} }
template <typename VALUE, typename CONFIG, MEMFLAGS F>
template <typename SCAN_FUNC>
inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_safepoint_scan(SCAN_FUNC& scan_f)
{
// We only allow this method to be used during a safepoint.
assert(SafepointSynchronize::is_at_safepoint(),
"must only be called in a safepoint");
assert(Thread::current()->is_VM_thread(),
"should be in vm thread");
// Here we skip protection,
// thus no other thread may use this table at the same time.
InternalTable* table = get_table();
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
Bucket* bucket = table->get_bucket(bucket_it);
// If bucket have a redirect the items will be in the new table.
// We must visit them there since the new table will contain any
// concurrent inserts done after this bucket was resized.
// If the bucket don't have redirect flag all items is in this table.
if (!bucket->have_redirect()) {
if(!visit_nodes(bucket, scan_f)) {
return;
}
} else {
assert(bucket->is_locked(), "Bucket must be locked.");
}
}
// If there is a paused resize we also need to visit the already resized items.
table = get_new_table();
if (table == NULL) {
return;
}
DEBUG_ONLY(if (table == POISON_PTR) { return; })
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
Bucket* bucket = table->get_bucket(bucket_it);
assert(!bucket->is_locked(), "Bucket must be unlocked.");
if (!visit_nodes(bucket, scan_f)) {
return;
}
}
}
template <typename VALUE, typename CONFIG, MEMFLAGS F> template <typename VALUE, typename CONFIG, MEMFLAGS F>
template <typename EVALUATE_FUNC, typename DELETE_FUNC> template <typename EVALUATE_FUNC, typename DELETE_FUNC>
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
@ -1142,6 +1187,8 @@ template <typename EVALUATE_FUNC, typename DELETE_FUNC>
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: inline void ConcurrentHashTable<VALUE, CONFIG, F>::
bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
{ {
assert(!SafepointSynchronize::is_at_safepoint(),
"must be outside a safepoint");
lock_resize_lock(thread); lock_resize_lock(thread);
do_bulk_delete_locked(thread, eval_f, del_f); do_bulk_delete_locked(thread, eval_f, del_f);
unlock_resize_lock(thread); unlock_resize_lock(thread);