8062943: REDO - Parallelize clearing the next mark bitmap

Reviewed-by: kbarrett, tschatzl
This commit is contained in:
Marcus Larsson 2014-11-25 11:59:55 +01:00
parent 561b51f5ce
commit 89dca54c74
6 changed files with 44 additions and 11 deletions

View File

@ -180,9 +180,32 @@ class ClearBitmapHRClosure : public HeapRegionClosure {
}
};
class ParClearNextMarkBitmapTask : public AbstractGangTask {
ClearBitmapHRClosure* _cl;
HeapRegionClaimer _hrclaimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
_cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
void work(uint worker_id) {
if (_suspendible) {
SuspendibleThreadSet::join();
}
G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
if (_suspendible) {
SuspendibleThreadSet::leave();
}
}
};
void CMBitMap::clearAll() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
G1CollectedHeap::heap()->heap_region_iterate(&cl);
uint n_workers = g1h->workers()->active_workers();
ParClearNextMarkBitmapTask task(&cl, n_workers, false);
g1h->workers()->run_task(&task);
guarantee(cl.complete(), "Must have completed iteration.");
return;
}
@ -861,7 +884,8 @@ void ConcurrentMark::clearNextBitmap() {
guarantee(!g1h->mark_in_progress(), "invariant");
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
g1h->heap_region_iterate(&cl);
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
_parallel_workers->run_task(&task);
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.

View File

@ -280,7 +280,6 @@ void ConcurrentMarkThread::run() {
// We may have aborted just before the remark. Do not bother clearing the
// bitmap then, as it has been done during mark abort.
if (!cm()->has_aborted()) {
SuspendibleThreadSetJoiner sts;
_cm->clearNextBitmap();
} else {
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");

View File

@ -2552,8 +2552,9 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
void
G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
uint worker_id,
HeapRegionClaimer *hrclaimer) const {
_hrm.par_iterate(cl, worker_id, hrclaimer);
HeapRegionClaimer *hrclaimer,
bool concurrent) const {
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
// Clear the cached CSet starting regions and (more importantly)

View File

@ -1380,10 +1380,13 @@ public:
// in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
// to each of the regions, by attempting to claim the region using the
// HeapRegionClaimer and, if successful, applying the closure to the claimed
// region.
// region. The concurrent argument should be set to true if iteration is
// performed concurrently, during which no assumptions are made for consistent
// attributes of the heap regions (as they might be modified while iterating).
void heap_region_par_iterate(HeapRegionClosure* cl,
uint worker_id,
HeapRegionClaimer* hrclaimer) const;
HeapRegionClaimer* hrclaimer,
bool concurrent = false) const;
// Clear the cached cset start regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.

View File

@ -260,7 +260,7 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
return num_regions;
}
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
// Every worker will actually look at all regions, skipping over regions that
@ -279,7 +279,11 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
// We'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed.
if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
// However, if the iteration is specified as concurrent, the values for
// is_starts_humongous and is_continues_humongous can not be trusted,
// and we should just blindly iterate over regions regardless of their
// humongous status.
if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
continue;
}
// OK, try to claim it
@ -287,7 +291,9 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
continue;
}
// Success!
if (r->is_starts_humongous()) {
// As mentioned above, special treatment of humongous regions can only be
// done if we are iterating non-concurrently.
if (!concurrent && r->is_starts_humongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In one case, calling the

View File

@ -222,7 +222,7 @@ public:
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
// Uncommit up to num_regions_to_remove regions that are completely free.
// Return the actual number of uncommitted regions.