8197569: Refactor eager reclaim for concurrent remembered set rebuilding

Expose information about eager reclaim region selection.

Reviewed-by: sjohanss, sangheki
This commit is contained in:
Thomas Schatzl 2018-03-26 16:51:41 +02:00
parent f8ee0063b1
commit af7d8f0d69
4 changed files with 28 additions and 16 deletions

View File

@ -2583,6 +2583,16 @@ size_t G1CollectedHeap::pending_card_num() {
return buffer_size * buffer_num + extra_cards; return buffer_size * buffer_num + extra_cards;
} }
bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
// We don't nominate objects with many remembered set entries, on
// the assumption that such objects are likely still live.
HeapRegionRemSet* rem_set = r->rem_set();
return G1EagerReclaimHumongousObjectsWithStaleRefs ?
rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
G1EagerReclaimHumongousObjects && rem_set->is_empty();
}
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private: private:
size_t _total_humongous; size_t _total_humongous;
@ -2590,23 +2600,14 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
DirtyCardQueue _dcq; DirtyCardQueue _dcq;
// We don't nominate objects with many remembered set entries, on bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
// the assumption that such objects are likely still live.
bool is_remset_small(HeapRegion* region) const {
HeapRegionRemSet* const rset = region->rem_set();
return G1EagerReclaimHumongousObjectsWithStaleRefs
? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
: rset->is_empty();
}
bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
assert(region->is_starts_humongous(), "Must start a humongous object"); assert(region->is_starts_humongous(), "Must start a humongous object");
oop obj = oop(region->bottom()); oop obj = oop(region->bottom());
// Dead objects cannot be eager reclaim candidates. Due to class // Dead objects cannot be eager reclaim candidates. Due to class
// unloading it is unsafe to query their classes so we return early. // unloading it is unsafe to query their classes so we return early.
if (heap->is_obj_dead(obj, region)) { if (g1h->is_obj_dead(obj, region)) {
return false; return false;
} }
@ -2646,7 +2647,8 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
// important use case for eager reclaim, and this special handling // important use case for eager reclaim, and this special handling
// may reduce needed headroom. // may reduce needed headroom.
return obj->is_typeArray() && is_remset_small(region); return obj->is_typeArray() &&
g1h->is_potential_eager_reclaim_candidate(region);
} }
public: public:
@ -4818,10 +4820,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
obj->is_typeArray() obj->is_typeArray()
); );
// Need to clear mark bit of the humongous object if already set. g1h->concurrent_mark()->humongous_object_eagerly_reclaimed(r);
if (next_bitmap->is_marked(r->bottom())) {
next_bitmap->clear(r->bottom());
}
_humongous_objects_reclaimed++; _humongous_objects_reclaimed++;
do { do {
HeapRegion* next = g1h->next_region_in_humongous(r); HeapRegion* next = g1h->next_region_in_humongous(r);

View File

@ -564,6 +564,9 @@ public:
void gc_prologue(bool full); void gc_prologue(bool full);
void gc_epilogue(bool full); void gc_epilogue(bool full);
// Does the given region fulfill remembered set based eager reclaim candidate requirements?
bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
// Modify the reclaim candidate set and test for presence. // Modify the reclaim candidate set and test for presence.
// These are only valid for starts_humongous regions. // These are only valid for starts_humongous regions.
inline void set_humongous_reclaim_candidate(uint region, bool value); inline void set_humongous_reclaim_candidate(uint region, bool value);

View File

@ -515,6 +515,14 @@ void G1ConcurrentMark::reset() {
set_concurrent_marking_in_progress(); set_concurrent_marking_in_progress();
} }
void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "May only be called at a safepoint.");
// Need to clear mark bit of the humongous object if already set and during a marking cycle.
if (_next_mark_bitmap->is_marked(r->bottom())) {
_next_mark_bitmap->clear(r->bottom());
}
}
void G1ConcurrentMark::reset_marking_state() { void G1ConcurrentMark::reset_marking_state() {
_global_mark_stack.set_empty(); _global_mark_stack.set_empty();

View File

@ -447,6 +447,8 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
// true, periodically insert checks to see if this method should exit prematurely. // true, periodically insert checks to see if this method should exit prematurely.
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
public: public:
// Notification for eagerly reclaimed regions to clean up.
void humongous_object_eagerly_reclaimed(HeapRegion* r);
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers // The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack. // between task-local queues and the global mark stack.