8213996: Remove one of the SparsePRT entry tables

Remove unused functionality in SparsePRT

Reviewed-by: shade, sjohanss
This commit is contained in:
Thomas Schatzl 2018-11-28 11:06:58 +01:00
parent 9a4eed85df
commit 0f0b8626fc
9 changed files with 28 additions and 297 deletions

View File

@ -1016,7 +1016,6 @@ void G1CollectedHeap::prepare_heap_for_full_collection() {
// Make sure we'll choose a new allocation region afterwards.
_allocator->release_mutator_alloc_region();
_allocator->abandon_gc_alloc_regions();
g1_rem_set()->cleanupHRRS();
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
@ -2974,13 +2973,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
evacuation_info.set_collectionset_regions(collection_set()->region_length());
// Make sure the remembered sets are up to date. This needs to be
// done before register_humongous_regions_with_cset(), because the
// remembered sets are used there to choose eager reclaim candidates.
// If the remembered sets are not up to date we might miss some
// entries that need to be handled.
g1_rem_set()->cleanupHRRS();
register_humongous_regions_with_cset();
assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");

View File

@ -60,7 +60,6 @@
// Forward declarations
class HeapRegion;
class HRRSCleanupTask;
class GenerationSpec;
class G1ParScanThreadState;
class G1ParScanThreadStateSet;

View File

@ -1217,18 +1217,15 @@ class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
FreeRegionList* _local_cleanup_list;
uint _old_regions_removed;
uint _humongous_regions_removed;
HRRSCleanupTask* _hrrs_cleanup_task;
public:
G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
FreeRegionList* local_cleanup_list,
HRRSCleanupTask* hrrs_cleanup_task) :
FreeRegionList* local_cleanup_list) :
_g1h(g1h),
_freed_bytes(0),
_local_cleanup_list(local_cleanup_list),
_old_regions_removed(0),
_humongous_regions_removed(0),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
_humongous_regions_removed(0) { }
size_t freed_bytes() { return _freed_bytes; }
const uint old_regions_removed() { return _old_regions_removed; }
@ -1248,8 +1245,6 @@ class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
hr->clear_cardtable();
_g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
} else {
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
}
return false;
@ -1266,16 +1261,11 @@ public:
_g1h(g1h),
_cleanup_list(cleanup_list),
_hrclaimer(n_workers) {
HeapRegionRemSet::reset_for_cleanup_tasks();
}
void work(uint worker_id) {
FreeRegionList local_cleanup_list("Local Cleanup List");
HRRSCleanupTask hrrs_cleanup_task;
G1ReclaimEmptyRegionsClosure cl(_g1h,
&local_cleanup_list,
&hrrs_cleanup_task);
G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
assert(cl.is_complete(), "Shouldn't have aborted!");
@ -1287,8 +1277,6 @@ public:
_cleanup_list->add_ordered(&local_cleanup_list);
assert(local_cleanup_list.is_empty(), "post-condition");
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
}
}
};

View File

@ -512,10 +512,6 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
}
}
void G1RemSet::cleanupHRRS() {
HeapRegionRemSet::cleanup();
}
void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
update_rem_set(pss, worker_i);
scan_rem_set(pss, worker_i);;

View File

@ -86,11 +86,6 @@ public:
// Initialize data that depends on the heap size being known.
void initialize(size_t capacity, uint max_regions);
// This is called to reset dual hash tables after the gc pause
// is finished and the initial hash table is no longer being
// scanned.
void cleanupHRRS();
G1RemSet(G1CollectedHeap* g1h,
G1CardTable* ct,
G1HotCardCache* hot_card_cache);

View File

@ -598,11 +598,6 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
}
}
void
OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
_sparse_table.do_cleanup_work(hrrs_cleanup_task);
}
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
HeapRegion* hr)
: _bot(bot),
@ -632,10 +627,6 @@ void HeapRegionRemSet::setup_remset_size() {
guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
}
void HeapRegionRemSet::cleanup() {
SparsePRT::cleanup_all();
}
void HeapRegionRemSet::clear(bool only_cardset) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
clear_locked(only_cardset);
@ -819,18 +810,6 @@ bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
return false;
}
void HeapRegionRemSet::reset_for_cleanup_tasks() {
SparsePRT::reset_for_cleanup_tasks();
}
void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
_other_regions.do_cleanup_work(hrrs_cleanup_task);
}
void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
}
#ifndef PRODUCT
void HeapRegionRemSet::test() {
os::sleep(Thread::current(), (jlong)5000, false);

View File

@ -42,11 +42,6 @@ class PerRegionTable;
class SparsePRT;
class nmethod;
// Essentially a wrapper around SparsePRTCleanupTask. See
// sparsePRT.hpp for more details.
class HRRSCleanupTask : public SparsePRTCleanupTask {
};
// The "_coarse_map" is a bitmap with one bit for each region, where set
// bits indicate that the corresponding region may contain some pointer
// into the owning region.
@ -122,6 +117,10 @@ class OtherRegionsTable {
bool contains_reference_locked(OopOrNarrowOopStar from) const;
size_t occ_fine() const;
size_t occ_coarse() const;
size_t occ_sparse() const;
public:
// Create a new remembered set. The given mutex is used to ensure consistency.
OtherRegionsTable(Mutex* m);
@ -144,9 +143,6 @@ public:
// Returns the number of cards contained in this remembered set.
size_t occupied() const;
size_t occ_fine() const;
size_t occ_coarse() const;
size_t occ_sparse() const;
static jint n_coarsenings() { return _n_coarsenings; }
@ -159,8 +155,6 @@ public:
// Clear the entire contents of this remembered set.
void clear();
void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
};
class HeapRegionRemSet : public CHeapObj<mtGC> {
@ -206,15 +200,6 @@ public:
size_t occupied_locked() {
return _other_regions.occupied();
}
size_t occ_fine() const {
return _other_regions.occ_fine();
}
size_t occ_coarse() const {
return _other_regions.occ_coarse();
}
size_t occ_sparse() const {
return _other_regions.occ_sparse();
}
static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
@ -340,9 +325,6 @@ public:
// consumed by the strong code roots.
size_t strong_code_roots_mem_size();
// Called during a stop-world phase to perform any deferred cleanups.
static void cleanup();
static void invalidate_from_card_cache(uint start_idx, size_t num_regions) {
G1FromCardCache::invalidate(start_idx, num_regions);
}
@ -351,16 +333,7 @@ public:
static void print_from_card_cache() {
G1FromCardCache::print();
}
#endif
// These are wrappers for the similarly-named methods on
// SparsePRT. Look at sparsePRT.hpp for more details.
static void reset_for_cleanup_tasks();
void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
// Run unit tests.
#ifndef PRODUCT
static void test();
#endif
};

View File

@ -287,165 +287,55 @@ size_t RSHashTable::mem_size() const {
// ----------------------------------------------------------------------
SparsePRT* volatile SparsePRT::_head_expanded_list = NULL;
void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
// We could expand multiple times in a pause -- only put on list once.
if (sprt->expanded()) return;
sprt->set_expanded(true);
SparsePRT* hd = _head_expanded_list;
while (true) {
sprt->_next_expanded = hd;
SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd);
if (res == hd) return;
else hd = res;
}
}
SparsePRT* SparsePRT::get_from_expanded_list() {
SparsePRT* hd = _head_expanded_list;
while (hd != NULL) {
SparsePRT* next = hd->next_expanded();
SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd);
if (res == hd) {
hd->set_next_expanded(NULL);
return hd;
} else {
hd = res;
}
}
return NULL;
}
void SparsePRT::reset_for_cleanup_tasks() {
_head_expanded_list = NULL;
}
void SparsePRT::do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task) {
if (should_be_on_expanded_list()) {
sprt_cleanup_task->add(this);
}
}
void SparsePRT::finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task) {
assert(ParGCRareEvent_lock->owned_by_self(), "pre-condition");
SparsePRT* head = sprt_cleanup_task->head();
SparsePRT* tail = sprt_cleanup_task->tail();
if (head != NULL) {
assert(tail != NULL, "if head is not NULL, so should tail");
tail->set_next_expanded(_head_expanded_list);
_head_expanded_list = head;
} else {
assert(tail == NULL, "if head is NULL, so should tail");
}
}
bool SparsePRT::should_be_on_expanded_list() {
if (_expanded) {
assert(_cur != _next, "if _expanded is true, cur should be != _next");
} else {
assert(_cur == _next, "if _expanded is false, cur should be == _next");
}
return expanded();
}
void SparsePRT::cleanup_all() {
// First clean up all expanded tables so they agree on next and cur.
SparsePRT* sprt = get_from_expanded_list();
while (sprt != NULL) {
sprt->cleanup();
sprt = get_from_expanded_list();
}
}
SparsePRT::SparsePRT() :
_expanded(false), _next_expanded(NULL)
{
_cur = new RSHashTable(InitialCapacity);
_next = _cur;
_table(new RSHashTable(InitialCapacity)) {
}
SparsePRT::~SparsePRT() {
assert(_next != NULL && _cur != NULL, "Inv");
if (_cur != _next) { delete _cur; }
delete _next;
delete _table;
}
size_t SparsePRT::mem_size() const {
// We ignore "_cur" here, because it either = _next, or else it is
// on the deleted list.
return sizeof(SparsePRT) + _next->mem_size();
return sizeof(SparsePRT) + _table->mem_size();
}
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
if (_next->should_expand()) {
if (_table->should_expand()) {
expand();
}
return _next->add_card(region_id, card_index);
return _table->add_card(region_id, card_index);
}
SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) {
return _next->get_entry(region_id);
return _table->get_entry(region_id);
}
bool SparsePRT::delete_entry(RegionIdx_t region_id) {
return _next->delete_entry(region_id);
return _table->delete_entry(region_id);
}
void SparsePRT::clear() {
// If they differ, _next is bigger then cur, so next has no chance of
// being the initial size.
if (_next != _cur) {
delete _next;
}
if (_cur->capacity() != InitialCapacity) {
delete _cur;
_cur = new RSHashTable(InitialCapacity);
// If the entry table is not at initial capacity, just create a new one.
if (_table->capacity() != InitialCapacity) {
delete _table;
_table = new RSHashTable(InitialCapacity);
} else {
_cur->clear();
_table->clear();
}
_next = _cur;
_expanded = false;
}
void SparsePRT::cleanup() {
// Make sure that the current and next tables agree.
if (_cur != _next) {
delete _cur;
}
_cur = _next;
set_expanded(false);
}
void SparsePRT::expand() {
RSHashTable* last = _next;
_next = new RSHashTable(last->capacity() * 2);
RSHashTable* last = _table;
_table = new RSHashTable(last->capacity() * 2);
for (size_t i = 0; i < last->num_entries(); i++) {
SparsePRTEntry* e = last->entry((int)i);
if (e->valid_entry()) {
_next->add_entry(e);
_table->add_entry(e);
}
}
if (last != _cur) {
delete last;
}
add_to_expanded_list(this);
}
void SparsePRTCleanupTask::add(SparsePRT* sprt) {
assert(sprt->should_be_on_expanded_list(), "pre-condition");
sprt->set_next_expanded(NULL);
if (_tail != NULL) {
_tail->set_next_expanded(sprt);
} else {
_head = sprt;
}
_tail = sprt;
delete last;
}

View File

@ -37,12 +37,6 @@
// indices of other regions to short sequences of cards in the other region
// that might contain pointers into the owner region.
// These tables only expand while they are accessed in parallel --
// deletions may be done in single-threaded code. This allows us to allow
// unsynchronized reads/iterations, as long as expansions caused by
// insertions only enqueue old versions for deletions, but do not delete
// old versions synchronously.
class SparsePRTEntry: public CHeapObj<mtGC> {
private:
// The type of a card entry.
@ -158,8 +152,6 @@ public:
// entries to a larger-capacity representation.
bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
bool delete_entry(RegionIdx_t region_id);
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
@ -220,16 +212,11 @@ public:
// Concurrent access to a SparsePRT must be serialized by some external mutex.
class SparsePRTIter;
class SparsePRTCleanupTask;
class SparsePRT {
friend class SparsePRTCleanupTask;
friend class SparsePRTIter;
// Iterations are done on the _cur hash table, since they only need to
// see entries visible at the start of a collection pause.
// All other operations are done using the _next hash table.
RSHashTable* _cur;
RSHashTable* _next;
RSHashTable* _table;
enum SomeAdditionalPrivateConstants {
InitialCapacity = 16
@ -237,26 +224,11 @@ class SparsePRT {
void expand();
bool _expanded;
bool expanded() { return _expanded; }
void set_expanded(bool b) { _expanded = b; }
SparsePRT* _next_expanded;
SparsePRT* next_expanded() { return _next_expanded; }
void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
bool should_be_on_expanded_list();
static SparsePRT* volatile _head_expanded_list;
public:
SparsePRT();
~SparsePRT();
size_t occupied() const { return _next->occupied_cards(); }
size_t occupied() const { return _table->occupied_cards(); }
size_t mem_size() const;
// Attempts to ensure that the given card_index in the given region is in
@ -277,72 +249,19 @@ public:
// Clear the table, and reinitialize to initial capacity.
void clear();
// Ensure that "_cur" and "_next" point to the same table.
void cleanup();
// Clean up all tables on the expanded list. Called single threaded.
static void cleanup_all();
RSHashTable* cur() const { return _cur; }
static void add_to_expanded_list(SparsePRT* sprt);
static SparsePRT* get_from_expanded_list();
// The purpose of these three methods is to help the GC workers
// during the cleanup pause to recreate the expanded list, purging
// any tables from it that belong to regions that are freed during
// cleanup (if we don't purge those tables, there is a race that
// causes various crashes; see CR 7014261).
//
// We chose to recreate the expanded list, instead of purging
// entries from it by iterating over it, to avoid this serial phase
// at the end of the cleanup pause.
//
// The three methods below work as follows:
// * reset_for_cleanup_tasks() : Nulls the expanded list head at the
// start of the cleanup pause.
// * do_cleanup_work() : Called by the cleanup workers for every
// region that is not free / is being freed by the cleanup
// pause. It creates a list of expanded tables whose head / tail
// are on the thread-local SparsePRTCleanupTask object.
// * finish_cleanup_task() : Called by the cleanup workers after
// they complete their cleanup task. It adds the local list into
// the global expanded list. It assumes that the
// ParGCRareEvent_lock is being held to ensure MT-safety.
static void reset_for_cleanup_tasks();
void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
return _next->contains_card(region_id, card_index);
return _table->contains_card(region_id, card_index);
}
};
class SparsePRTIter: public RSHashTableIter {
public:
SparsePRTIter(const SparsePRT* sprt) :
RSHashTableIter(sprt->cur()) {}
RSHashTableIter(sprt->_table) { }
bool has_next(size_t& card_index) {
return RSHashTableIter::has_next(card_index);
}
};
// This allows each worker during a cleanup pause to create a
// thread-local list of sparse tables that have been expanded and need
// to be processed at the beginning of the next GC pause. This lists
// are concatenated into the single expanded list at the end of the
// cleanup pause.
class SparsePRTCleanupTask {
private:
SparsePRT* _head;
SparsePRT* _tail;
public:
SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
void add(SparsePRT* sprt);
SparsePRT* head() { return _head; }
SparsePRT* tail() { return _tail; }
};
#endif // SHARE_VM_GC_G1_SPARSEPRT_HPP