From 9a79d90cc696ea1160f4d99a44a69447d57b353d Mon Sep 17 00:00:00 2001 From: Robbin Ehn Date: Wed, 14 Nov 2018 07:50:37 +0100 Subject: [PATCH] 8213574: Deadlock in string table expansion when dumping lots of CDS classes Reviewed-by: jiangli, iklam, dholmes --- src/hotspot/share/classfile/stringTable.cpp | 2 +- src/hotspot/share/classfile/symbolTable.cpp | 4 +- .../share/utilities/concurrentHashTable.hpp | 6 +++ .../utilities/concurrentHashTable.inline.hpp | 47 +++++++++++++++++++ 4 files changed, 56 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 2d9f0fe197c..7e21afc50f7 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -847,7 +847,7 @@ void StringTable::copy_shared_string_table(CompactHashtableWriter* writer) { assert(HeapShared::is_heap_object_archiving_allowed(), "must be"); CopyToArchive copy(writer); - StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); + StringTable::the_table()->_local_table->do_safepoint_scan(copy); } void StringTable::write_to_archive() { diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index b55c59b099a..258a64704b4 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -277,7 +277,7 @@ public: void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) { assert(DumpSharedSpaces, "called only during dump time"); MetaspacePointersDo mpd(it); - SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd); + SymbolTable::the_table()->_local_table->do_safepoint_scan(mpd); } Symbol* SymbolTable::lookup_dynamic(const char* name, @@ -640,7 +640,7 @@ struct CopyToArchive : StackObj { void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) { CopyToArchive copy(writer); - SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy); + SymbolTable::the_table()->_local_table->do_safepoint_scan(copy); } void SymbolTable::write_to_archive() { diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp index f93494b885c..bab7e8c2b8b 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -472,6 +472,12 @@ class ConcurrentHashTable : public CHeapObj { template void do_scan(Thread* thread, SCAN_FUNC& scan_f); + // Visit all items with SCAN_FUNC without any protection. + // It will assume there is no other thread accessing this + // table during the safepoint. Must be called with VM thread. + template + void do_safepoint_scan(SCAN_FUNC& scan_f); + // Destroying items matching EVALUATE_FUNC, before destroying items // DELETE_FUNC is called, if resize lock is obtained. Else returns false. template diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index 3e08c3ee40f..9da79e0e4a0 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -1116,6 +1116,8 @@ template inline void ConcurrentHashTable:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { + assert(!SafepointSynchronize::is_at_safepoint(), + "must be outside a safepoint"); assert(_resize_lock_owner != thread, "Re-size lock held"); lock_resize_lock(thread); do_scan_locked(thread, scan_f); @@ -1123,6 +1125,49 @@ inline void ConcurrentHashTable:: assert(_resize_lock_owner != thread, "Re-size lock held"); } +template +template +inline void ConcurrentHashTable:: + do_safepoint_scan(SCAN_FUNC& scan_f) +{ + // We only allow this method to be used during a safepoint. + assert(SafepointSynchronize::is_at_safepoint(), + "must only be called in a safepoint"); + assert(Thread::current()->is_VM_thread(), + "should be in vm thread"); + + // Here we skip protection, + // thus no other thread may use this table at the same time. + InternalTable* table = get_table(); + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { + Bucket* bucket = table->get_bucket(bucket_it); + // If bucket have a redirect the items will be in the new table. + // We must visit them there since the new table will contain any + // concurrent inserts done after this bucket was resized. + // If the bucket don't have redirect flag all items is in this table. + if (!bucket->have_redirect()) { + if(!visit_nodes(bucket, scan_f)) { + return; + } + } else { + assert(bucket->is_locked(), "Bucket must be locked."); + } + } + // If there is a paused resize we also need to visit the already resized items. + table = get_new_table(); + if (table == NULL) { + return; + } + DEBUG_ONLY(if (table == POISON_PTR) { return; }) + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { + Bucket* bucket = table->get_bucket(bucket_it); + assert(!bucket->is_locked(), "Bucket must be unlocked."); + if (!visit_nodes(bucket, scan_f)) { + return; + } + } +} + template template inline bool ConcurrentHashTable:: @@ -1142,6 +1187,8 @@ template inline void ConcurrentHashTable:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { + assert(!SafepointSynchronize::is_at_safepoint(), + "must be outside a safepoint"); lock_resize_lock(thread); do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread);