6817419: G1: Enable extensive verification for humongous regions
Enabled full verification for humongous regions. Also made sure that the VerifyAfterGC works with deferred updates and G1HRRSFlushLogBuffersOnVerify. Reviewed-by: tonyp
This commit is contained in:
parent
ecdb99412d
commit
82619cc8da
@ -145,14 +145,9 @@ void ConcurrentG1Refine::set_pya_restart() {
|
|||||||
if (G1RSBarrierUseQueue) {
|
if (G1RSBarrierUseQueue) {
|
||||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||||
dcqs.abandon_logs();
|
dcqs.abandon_logs();
|
||||||
if (_cg1rThread->do_traversal()) {
|
// Reset the post-yield actions.
|
||||||
_pya = PYA_restart;
|
_pya = PYA_continue;
|
||||||
} else {
|
_last_pya = PYA_continue;
|
||||||
_cg1rThread->set_do_traversal(true);
|
|
||||||
// Reset the post-yield actions.
|
|
||||||
_pya = PYA_continue;
|
|
||||||
_last_pya = PYA_continue;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
_pya = PYA_restart;
|
_pya = PYA_restart;
|
||||||
}
|
}
|
||||||
|
@ -961,6 +961,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
|||||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
|
prepare_for_verify();
|
||||||
Universe::verify(false);
|
Universe::verify(false);
|
||||||
}
|
}
|
||||||
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
|
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
|
||||||
@ -2135,15 +2136,7 @@ public:
|
|||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
||||||
"Should be unclaimed at verify points.");
|
"Should be unclaimed at verify points.");
|
||||||
if (r->isHumongous()) {
|
if (!r->continuesHumongous()) {
|
||||||
if (r->startsHumongous()) {
|
|
||||||
// Verify the single H object.
|
|
||||||
oop(r->bottom())->verify();
|
|
||||||
size_t word_sz = oop(r->bottom())->size();
|
|
||||||
guarantee(r->top() == r->bottom() + word_sz,
|
|
||||||
"Only one object in a humongous region");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
VerifyObjsInRegionClosure not_dead_yet_cl(r);
|
VerifyObjsInRegionClosure not_dead_yet_cl(r);
|
||||||
r->verify(_allow_dirty);
|
r->verify(_allow_dirty);
|
||||||
r->object_iterate(¬_dead_yet_cl);
|
r->object_iterate(¬_dead_yet_cl);
|
||||||
@ -2195,6 +2188,7 @@ public:
|
|||||||
_g1h(g1h), _allow_dirty(allow_dirty) { }
|
_g1h(g1h), _allow_dirty(allow_dirty) { }
|
||||||
|
|
||||||
void work(int worker_i) {
|
void work(int worker_i) {
|
||||||
|
HandleMark hm;
|
||||||
VerifyRegionClosure blk(_allow_dirty, true);
|
VerifyRegionClosure blk(_allow_dirty, true);
|
||||||
_g1h->heap_region_par_iterate_chunked(&blk, worker_i,
|
_g1h->heap_region_par_iterate_chunked(&blk, worker_i,
|
||||||
HeapRegion::ParVerifyClaimValue);
|
HeapRegion::ParVerifyClaimValue);
|
||||||
@ -2713,6 +2707,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
|||||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
|
prepare_for_verify();
|
||||||
Universe::verify(false);
|
Universe::verify(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2844,6 +2839,12 @@ void G1CollectedHeap::forget_alloc_region_list() {
|
|||||||
while (_gc_alloc_region_list != NULL) {
|
while (_gc_alloc_region_list != NULL) {
|
||||||
HeapRegion* r = _gc_alloc_region_list;
|
HeapRegion* r = _gc_alloc_region_list;
|
||||||
assert(r->is_gc_alloc_region(), "Invariant.");
|
assert(r->is_gc_alloc_region(), "Invariant.");
|
||||||
|
// We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
|
||||||
|
// newly allocated data in order to be able to apply deferred updates
|
||||||
|
// before the GC is done for verification purposes (i.e to allow
|
||||||
|
// G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
|
||||||
|
// collection.
|
||||||
|
r->ContiguousSpace::set_saved_mark();
|
||||||
_gc_alloc_region_list = r->next_gc_alloc_region();
|
_gc_alloc_region_list = r->next_gc_alloc_region();
|
||||||
r->set_next_gc_alloc_region(NULL);
|
r->set_next_gc_alloc_region(NULL);
|
||||||
r->set_is_gc_alloc_region(false);
|
r->set_is_gc_alloc_region(false);
|
||||||
@ -3738,7 +3739,9 @@ protected:
|
|||||||
CardTableModRefBS* ctbs() { return _ct_bs; }
|
CardTableModRefBS* ctbs() { return _ct_bs; }
|
||||||
|
|
||||||
void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
|
void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
|
||||||
_g1_rem->par_write_ref(from, p, tid);
|
if (!from->is_survivor()) {
|
||||||
|
_g1_rem->par_write_ref(from, p, tid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
|
void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
|
||||||
|
@ -716,8 +716,7 @@ public:
|
|||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->in_collection_set() &&
|
if (!r->in_collection_set() &&
|
||||||
!r->continuesHumongous() &&
|
!r->continuesHumongous() &&
|
||||||
!r->is_young() &&
|
!r->is_young()) {
|
||||||
!r->is_survivor()) {
|
|
||||||
_update_rs_oop_cl.set_from(r);
|
_update_rs_oop_cl.set_from(r);
|
||||||
UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
|
UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
|
||||||
|
|
||||||
@ -854,7 +853,7 @@ void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
|
|||||||
// before all the cards on the region are dirtied. This is unlikely,
|
// before all the cards on the region are dirtied. This is unlikely,
|
||||||
// and it doesn't happen often, but it can happen. So, the extra
|
// and it doesn't happen often, but it can happen. So, the extra
|
||||||
// check below filters out those cards.
|
// check below filters out those cards.
|
||||||
if (r->is_young() || r->is_survivor()) {
|
if (r->is_young()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// While we are processing RSet buffers during the collection, we
|
// While we are processing RSet buffers during the collection, we
|
||||||
@ -1025,7 +1024,9 @@ void HRInto_G1RemSet::print_summary_info() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
void HRInto_G1RemSet::prepare_for_verify() {
|
void HRInto_G1RemSet::prepare_for_verify() {
|
||||||
if (G1HRRSFlushLogBuffersOnVerify && VerifyBeforeGC && !_g1->full_collection()) {
|
if (G1HRRSFlushLogBuffersOnVerify &&
|
||||||
|
(VerifyBeforeGC || VerifyAfterGC)
|
||||||
|
&& !_g1->full_collection()) {
|
||||||
cleanupHRRS();
|
cleanupHRRS();
|
||||||
_g1->set_refine_cte_cl_concurrency(false);
|
_g1->set_refine_cte_cl_concurrency(false);
|
||||||
if (SafepointSynchronize::is_at_safepoint()) {
|
if (SafepointSynchronize::is_at_safepoint()) {
|
||||||
@ -1036,5 +1037,7 @@ void HRInto_G1RemSet::prepare_for_verify() {
|
|||||||
_cg1r->set_use_cache(false);
|
_cg1r->set_use_cache(false);
|
||||||
updateRS(0);
|
updateRS(0);
|
||||||
_cg1r->set_use_cache(cg1r_use_cache);
|
_cg1r->set_use_cache(cg1r_use_cache);
|
||||||
|
|
||||||
|
assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user