8184667: Clean up G1ConcurrentMark files
Fix naming, formatting, access control, remove unused code. Reviewed-by: sjohanss, pliden
This commit is contained in:
parent
817d6bc039
commit
0757704af2
@ -95,7 +95,7 @@ public:
|
||||
_cm(cm) {}
|
||||
|
||||
void do_void(){
|
||||
_cm->checkpointRootsFinal(false); // !clear_all_soft_refs
|
||||
_cm->checkpoint_roots_final(false); // !clear_all_soft_refs
|
||||
}
|
||||
};
|
||||
|
||||
@ -429,7 +429,7 @@ void ConcurrentMarkThread::run_service() {
|
||||
G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
|
||||
_cm->cleanup_for_next_mark();
|
||||
} else {
|
||||
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
|
||||
assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ public:
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1ConcurrentMark* cm = g1h->concurrent_mark();
|
||||
G1CreateLiveDataClosure cl(g1h, cm, cm->nextMarkBitMap(), _live_data);
|
||||
G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
|
||||
g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer);
|
||||
}
|
||||
};
|
||||
|
@ -1768,7 +1768,7 @@ jint G1CollectedHeap::initialize() {
|
||||
vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
_cmThread = _cm->cmThread();
|
||||
_cmThread = _cm->cm_thread();
|
||||
|
||||
// Now expand into the initial heap size.
|
||||
if (!expand(init_byte_size, _workers)) {
|
||||
@ -3031,7 +3031,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->record_collection_pause_start(sample_start_time_sec);
|
||||
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
concurrent_mark()->checkpointRootsInitialPre();
|
||||
concurrent_mark()->checkpoint_roots_initial_pre();
|
||||
}
|
||||
|
||||
g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
|
||||
@ -3102,7 +3102,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// We have to do this before we notify the CM threads that
|
||||
// they can start working to make sure that all the
|
||||
// appropriate initialization is done on the CM object.
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
concurrent_mark()->checkpoint_roots_initial_post();
|
||||
collector_state()->set_mark_in_progress(true);
|
||||
// Note that we don't actually trigger the CM thread at
|
||||
// this point. We do that later when we're sure that
|
||||
@ -4155,7 +4155,7 @@ void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_
|
||||
// To avoid spawning task when there is no work to do, check that
|
||||
// a concurrent cycle is active and that some references have been
|
||||
// discovered.
|
||||
if (concurrent_mark()->cmThread()->during_cycle() &&
|
||||
if (concurrent_mark()->cm_thread()->during_cycle() &&
|
||||
ref_processor_cm()->has_discovered_references()) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
@ -4448,7 +4448,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
|
||||
if (G1VerifyBitmaps) {
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
concurrent_mark()->clearRangePrevBitmap(mr);
|
||||
concurrent_mark()->clear_range_in_prev_bitmap(mr);
|
||||
}
|
||||
|
||||
// Clear the card counts for this region.
|
||||
@ -4814,7 +4814,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
oop obj = (oop)r->bottom();
|
||||
G1CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
|
||||
G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
|
||||
|
||||
// The following checks whether the humongous object is live are sufficient.
|
||||
// The main additional check (in addition to having a reference from the roots
|
||||
|
@ -1361,7 +1361,7 @@ public:
|
||||
// is not marked, and c) it is not in an archive region.
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
hr->is_obj_dead(obj, _cm->prevMarkBitMap()) &&
|
||||
hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
||||
return _cm->nextMarkBitMap()->is_marked((HeapWord*)obj);
|
||||
return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_in_cset(oop obj) {
|
||||
|
@ -332,16 +332,15 @@ uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
|
||||
|
||||
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
|
||||
_g1h(g1h),
|
||||
_markBitMap1(),
|
||||
_markBitMap2(),
|
||||
_mark_bitmap_1(),
|
||||
_mark_bitmap_2(),
|
||||
_parallel_marking_threads(0),
|
||||
_max_parallel_marking_threads(0),
|
||||
_sleep_factor(0.0),
|
||||
_marking_task_overhead(1.0),
|
||||
_cleanup_list("Cleanup List"),
|
||||
_cleanup_list("Concurrent Mark Cleanup List"),
|
||||
|
||||
_prevMarkBitMap(&_markBitMap1),
|
||||
_nextMarkBitMap(&_markBitMap2),
|
||||
_prev_mark_bitmap(&_mark_bitmap_1),
|
||||
_next_mark_bitmap(&_mark_bitmap_2),
|
||||
|
||||
_global_mark_stack(),
|
||||
// _finger set in set_non_marking_state
|
||||
@ -363,7 +362,9 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
// _verbose_level set below
|
||||
|
||||
_init_times(),
|
||||
_remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
|
||||
_remark_times(),
|
||||
_remark_mark_times(),
|
||||
_remark_weak_ref_times(),
|
||||
_cleanup_times(),
|
||||
_total_counting_time(0.0),
|
||||
_total_rs_scrub_time(0.0),
|
||||
@ -372,18 +373,18 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
|
||||
_completed_initialization(false) {
|
||||
|
||||
_markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
|
||||
_markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
|
||||
_mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
|
||||
_mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
|
||||
|
||||
// Create & start a ConcurrentMark thread.
|
||||
_cmThread = new ConcurrentMarkThread(this);
|
||||
assert(cmThread() != NULL, "CM Thread should have been created");
|
||||
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
||||
if (_cmThread->osthread() == NULL) {
|
||||
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
|
||||
_cm_thread = new ConcurrentMarkThread(this);
|
||||
assert(cm_thread() != NULL, "CM Thread should have been created");
|
||||
assert(cm_thread()->cm() != NULL, "CM Thread should refer to this G1ConcurrentMark");
|
||||
if (_cm_thread->osthread() == NULL) {
|
||||
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
|
||||
}
|
||||
|
||||
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
||||
assert(CGC_lock != NULL, "CGC_lock must be initialized");
|
||||
|
||||
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
||||
satb_qs.set_buffer_size(G1SATBBufferSize);
|
||||
@ -399,7 +400,6 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
// Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
|
||||
// if both are set
|
||||
_sleep_factor = 0.0;
|
||||
_marking_task_overhead = 1.0;
|
||||
} else if (G1MarkingOverheadPercent > 0) {
|
||||
// We will calculate the number of parallel marking threads based
|
||||
// on a target overhead with respect to the soft real-time goal
|
||||
@ -416,14 +416,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
|
||||
FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
|
||||
_sleep_factor = sleep_factor;
|
||||
_marking_task_overhead = marking_task_overhead;
|
||||
} else {
|
||||
// Calculate the number of parallel marking threads by scaling
|
||||
// the number of parallel GC threads.
|
||||
uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
|
||||
FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
|
||||
_sleep_factor = 0.0;
|
||||
_marking_task_overhead = 1.0;
|
||||
}
|
||||
|
||||
assert(ConcGCThreads > 0, "Should have been set");
|
||||
@ -432,8 +430,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
_parallel_marking_threads = ConcGCThreads;
|
||||
_max_parallel_marking_threads = _parallel_marking_threads;
|
||||
|
||||
_parallel_workers = new WorkGang("G1 Marker",
|
||||
_max_parallel_marking_threads, false, true);
|
||||
_parallel_workers = new WorkGang("G1 Marker", _max_parallel_marking_threads, false, true);
|
||||
if (_parallel_workers == NULL) {
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
} else {
|
||||
@ -443,7 +440,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
if (FLAG_IS_DEFAULT(MarkStackSize)) {
|
||||
size_t mark_stack_size =
|
||||
MIN2(MarkStackSizeMax,
|
||||
MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
|
||||
MAX2(MarkStackSize, (size_t) (_parallel_marking_threads * TASKQUEUE_SIZE)));
|
||||
// Verify that the calculated value for MarkStackSize is in range.
|
||||
// It would be nice to use the private utility routine from Arguments.
|
||||
if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
|
||||
@ -489,7 +486,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
|
||||
task_queue->initialize();
|
||||
_task_queues->register_queue(i, task_queue);
|
||||
|
||||
_tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
|
||||
_tasks[i] = new G1CMTask(i, this, task_queue);
|
||||
|
||||
_accum_task_vtime[i] = 0.0;
|
||||
}
|
||||
@ -515,11 +512,11 @@ void G1ConcurrentMark::reset() {
|
||||
// Reset all the marking data structures and any necessary flags
|
||||
reset_marking_state();
|
||||
|
||||
// We do reset all of them, since different phases will use
|
||||
// We reset all of them, since different phases will use
|
||||
// different number of active threads. So, it's easiest to have all
|
||||
// of them ready.
|
||||
for (uint i = 0; i < _max_worker_id; ++i) {
|
||||
_tasks[i]->reset(_nextMarkBitMap);
|
||||
_tasks[i]->reset(_next_mark_bitmap);
|
||||
}
|
||||
|
||||
// we need this to make sure that the flag is on during the evac
|
||||
@ -561,8 +558,9 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr
|
||||
|
||||
_concurrent = concurrent;
|
||||
// We propagate this to all tasks, not just the active ones.
|
||||
for (uint i = 0; i < _max_worker_id; ++i)
|
||||
for (uint i = 0; i < _max_worker_id; ++i) {
|
||||
_tasks[i]->set_concurrent(concurrent);
|
||||
}
|
||||
|
||||
if (concurrent) {
|
||||
set_concurrent_marking_in_progress();
|
||||
@ -624,7 +622,7 @@ private:
|
||||
// as asserts here to minimize their overhead on the product. However, we
|
||||
// will have them as guarantees at the beginning / end of the bitmap
|
||||
// clearing to get some checking in the product.
|
||||
assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
|
||||
assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
|
||||
assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
|
||||
}
|
||||
assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
|
||||
@ -673,7 +671,7 @@ void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool
|
||||
void G1ConcurrentMark::cleanup_for_next_mark() {
|
||||
// Make sure that the concurrent mark thread looks to still be in
|
||||
// the current cycle.
|
||||
guarantee(cmThread()->during_cycle(), "invariant");
|
||||
guarantee(cm_thread()->during_cycle(), "invariant");
|
||||
|
||||
// We are finishing up the current cycle by clearing the next
|
||||
// marking bitmap and getting it ready for the next cycle. During
|
||||
@ -681,7 +679,7 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
|
||||
// is the case.
|
||||
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
|
||||
|
||||
clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
|
||||
clear_bitmap(_next_mark_bitmap, _parallel_workers, true);
|
||||
|
||||
// Clear the live count data. If the marking has been aborted, the abort()
|
||||
// call already did that.
|
||||
@ -691,13 +689,13 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
|
||||
}
|
||||
|
||||
// Repeat the asserts from above.
|
||||
guarantee(cmThread()->during_cycle(), "invariant");
|
||||
guarantee(cm_thread()->during_cycle(), "invariant");
|
||||
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
|
||||
clear_bitmap(_prevMarkBitMap, workers, false);
|
||||
clear_bitmap(_prev_mark_bitmap, workers, false);
|
||||
}
|
||||
|
||||
class CheckBitmapClearHRClosure : public HeapRegionClosure {
|
||||
@ -717,8 +715,8 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
|
||||
}
|
||||
};
|
||||
|
||||
bool G1ConcurrentMark::nextMarkBitmapIsClear() {
|
||||
CheckBitmapClearHRClosure cl(_nextMarkBitMap);
|
||||
bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
|
||||
CheckBitmapClearHRClosure cl(_next_mark_bitmap);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
return cl.complete();
|
||||
}
|
||||
@ -731,7 +729,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentMark::checkpointRootsInitialPre() {
|
||||
void G1ConcurrentMark::checkpoint_roots_initial_pre() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
_has_aborted = false;
|
||||
@ -745,7 +743,7 @@ void G1ConcurrentMark::checkpointRootsInitialPre() {
|
||||
}
|
||||
|
||||
|
||||
void G1ConcurrentMark::checkpointRootsInitialPost() {
|
||||
void G1ConcurrentMark::checkpoint_roots_initial_post() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// Start Concurrent Marking weak-reference discovery.
|
||||
@ -853,23 +851,23 @@ public:
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
|
||||
assert(worker_id < _cm->active_tasks(), "invariant");
|
||||
G1CMTask* the_task = _cm->task(worker_id);
|
||||
the_task->record_start_time();
|
||||
G1CMTask* task = _cm->task(worker_id);
|
||||
task->record_start_time();
|
||||
if (!_cm->has_aborted()) {
|
||||
do {
|
||||
double start_vtime_sec = os::elapsedVTime();
|
||||
double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
|
||||
|
||||
the_task->do_marking_step(mark_step_duration_ms,
|
||||
true /* do_termination */,
|
||||
false /* is_serial*/);
|
||||
task->do_marking_step(mark_step_duration_ms,
|
||||
true /* do_termination */,
|
||||
false /* is_serial*/);
|
||||
|
||||
double end_vtime_sec = os::elapsedVTime();
|
||||
double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
|
||||
_cm->do_yield_check();
|
||||
|
||||
jlong sleep_time_ms;
|
||||
if (!_cm->has_aborted() && the_task->has_aborted()) {
|
||||
if (!_cm->has_aborted() && task->has_aborted()) {
|
||||
sleep_time_ms =
|
||||
(jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
|
||||
{
|
||||
@ -877,10 +875,10 @@ public:
|
||||
os::sleep(Thread::current(), sleep_time_ms, false);
|
||||
}
|
||||
}
|
||||
} while (!_cm->has_aborted() && the_task->has_aborted());
|
||||
} while (!_cm->has_aborted() && task->has_aborted());
|
||||
}
|
||||
the_task->record_end_time();
|
||||
guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
|
||||
task->record_end_time();
|
||||
guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
|
||||
}
|
||||
|
||||
double end_vtime = os::elapsedVTime();
|
||||
@ -901,23 +899,23 @@ uint G1ConcurrentMark::calc_parallel_marking_threads() {
|
||||
if (!UseDynamicNumberOfGCThreads ||
|
||||
(!FLAG_IS_DEFAULT(ConcGCThreads) &&
|
||||
!ForceDynamicNumberOfGCThreads)) {
|
||||
n_conc_workers = max_parallel_marking_threads();
|
||||
n_conc_workers = _max_parallel_marking_threads;
|
||||
} else {
|
||||
n_conc_workers =
|
||||
AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
|
||||
AdaptiveSizePolicy::calc_default_active_workers(_max_parallel_marking_threads,
|
||||
1, /* Minimum workers */
|
||||
parallel_marking_threads(),
|
||||
_parallel_marking_threads,
|
||||
Threads::number_of_non_daemon_threads());
|
||||
// Don't scale down "n_conc_workers" by scale_parallel_threads() because
|
||||
// that scaling has already gone into "_max_parallel_marking_threads".
|
||||
}
|
||||
assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
|
||||
assert(n_conc_workers > 0 && n_conc_workers <= _max_parallel_marking_threads,
|
||||
"Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
|
||||
max_parallel_marking_threads(), n_conc_workers);
|
||||
_max_parallel_marking_threads, n_conc_workers);
|
||||
return n_conc_workers;
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
|
||||
void G1ConcurrentMark::scan_root_region(HeapRegion* hr) {
|
||||
// Currently, only survivors can be root regions.
|
||||
assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
|
||||
G1RootRegionScanClosure cl(_g1h, this);
|
||||
@ -949,7 +947,7 @@ public:
|
||||
G1CMRootRegions* root_regions = _cm->root_regions();
|
||||
HeapRegion* hr = root_regions->claim_next();
|
||||
while (hr != NULL) {
|
||||
_cm->scanRootRegion(hr);
|
||||
_cm->scan_root_region(hr);
|
||||
hr = root_regions->claim_next();
|
||||
}
|
||||
}
|
||||
@ -966,7 +964,7 @@ void G1ConcurrentMark::scan_root_regions() {
|
||||
// We distribute work on a per-region basis, so starting
|
||||
// more threads than that is useless.
|
||||
root_regions()->num_root_regions());
|
||||
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
|
||||
assert(_parallel_marking_threads <= _max_parallel_marking_threads,
|
||||
"Maximum number of marking threads exceeded");
|
||||
|
||||
G1CMRootRegionScanTask task(this);
|
||||
@ -1013,10 +1011,10 @@ void G1ConcurrentMark::mark_from_roots() {
|
||||
|
||||
// _g1h has _n_par_threads
|
||||
_parallel_marking_threads = calc_parallel_marking_threads();
|
||||
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
|
||||
assert(_parallel_marking_threads <= _max_parallel_marking_threads,
|
||||
"Maximum number of marking threads exceeded");
|
||||
|
||||
uint active_workers = MAX2(1U, parallel_marking_threads());
|
||||
uint active_workers = MAX2(1U, _parallel_marking_threads);
|
||||
assert(active_workers > 0, "Should have been set");
|
||||
|
||||
// Setting active workers is not guaranteed since fewer
|
||||
@ -1028,12 +1026,12 @@ void G1ConcurrentMark::mark_from_roots() {
|
||||
// Parallel task terminator is set in "set_concurrency_and_phase()"
|
||||
set_concurrency_and_phase(active_workers, true /* concurrent */);
|
||||
|
||||
G1CMConcurrentMarkingTask markingTask(this, cmThread());
|
||||
_parallel_workers->run_task(&markingTask);
|
||||
G1CMConcurrentMarkingTask marking_task(this, cm_thread());
|
||||
_parallel_workers->run_task(&marking_task);
|
||||
print_stats();
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
|
||||
// world is stopped at this checkpoint
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"world should be stopped");
|
||||
@ -1060,11 +1058,11 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
checkpointRootsFinalWork();
|
||||
checkpoint_roots_final_work();
|
||||
|
||||
double mark_work_end = os::elapsedTime();
|
||||
|
||||
weakRefsWork(clear_all_soft_refs);
|
||||
weak_refs_work(clear_all_soft_refs);
|
||||
|
||||
if (has_overflown()) {
|
||||
// We overflowed. Restart concurrent marking.
|
||||
@ -1258,7 +1256,7 @@ void G1ConcurrentMark::cleanup() {
|
||||
}
|
||||
|
||||
// Install newly created mark bitMap as "prev".
|
||||
swapMarkBitMaps();
|
||||
swap_mark_bitmaps();
|
||||
|
||||
g1h->reset_gc_time_stamp();
|
||||
|
||||
@ -1585,7 +1583,7 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
_workers->run_task(&enq_task_proxy);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
|
||||
if (has_overflown()) {
|
||||
// Skip processing the discovered references if we have
|
||||
// overflown the global marking stack. Reference objects
|
||||
@ -1714,10 +1712,10 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::swapMarkBitMaps() {
|
||||
G1CMBitMap* temp = _prevMarkBitMap;
|
||||
_prevMarkBitMap = _nextMarkBitMap;
|
||||
_nextMarkBitMap = temp;
|
||||
void G1ConcurrentMark::swap_mark_bitmaps() {
|
||||
G1CMBitMap* temp = _prev_mark_bitmap;
|
||||
_prev_mark_bitmap = _next_mark_bitmap;
|
||||
_next_mark_bitmap = temp;
|
||||
}
|
||||
|
||||
// Closure for marking entries in SATB buffers.
|
||||
@ -1817,7 +1815,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentMark::checkpointRootsFinalWork() {
|
||||
void G1ConcurrentMark::checkpoint_roots_final_work() {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
@ -1854,8 +1852,8 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
|
||||
print_stats();
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
|
||||
_prevMarkBitMap->clear_range(mr);
|
||||
void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
|
||||
_prev_mark_bitmap->clear_range(mr);
|
||||
}
|
||||
|
||||
HeapRegion*
|
||||
@ -1960,8 +1958,8 @@ void G1ConcurrentMark::verify_no_cset_oops() {
|
||||
}
|
||||
|
||||
// Verify the task fingers
|
||||
assert(parallel_marking_threads() <= _max_worker_id, "sanity");
|
||||
for (uint i = 0; i < parallel_marking_threads(); ++i) {
|
||||
assert(_parallel_marking_threads <= _max_worker_id, "sanity");
|
||||
for (uint i = 0; i < _parallel_marking_threads; ++i) {
|
||||
G1CMTask* task = _tasks[i];
|
||||
HeapWord* task_finger = task->finger();
|
||||
if (task_finger != NULL && task_finger < _heap_end) {
|
||||
@ -1976,15 +1974,15 @@ void G1ConcurrentMark::verify_no_cset_oops() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
void G1ConcurrentMark::create_live_data() {
|
||||
_g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
|
||||
_g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _next_mark_bitmap);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::finalize_live_data() {
|
||||
_g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
|
||||
_g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::verify_live_data() {
|
||||
_g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
|
||||
_g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
|
||||
@ -2009,7 +2007,7 @@ void G1ConcurrentMark::print_stats() {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::abort() {
|
||||
if (!cmThread()->during_cycle() || _has_aborted) {
|
||||
if (!cm_thread()->during_cycle() || _has_aborted) {
|
||||
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
|
||||
return;
|
||||
}
|
||||
@ -2018,7 +2016,7 @@ void G1ConcurrentMark::abort() {
|
||||
// concurrent bitmap clearing.
|
||||
{
|
||||
GCTraceTime(Debug, gc)("Clear Next Bitmap");
|
||||
clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
|
||||
clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
|
||||
}
|
||||
// Note we cannot clear the previous marking bitmap here
|
||||
// since VerifyDuringGC verifies the objects marked during
|
||||
@ -2084,7 +2082,7 @@ void G1ConcurrentMark::print_summary_info() {
|
||||
log.trace(" Total stop_world time = %8.2f s.",
|
||||
(_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
|
||||
log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).",
|
||||
cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
|
||||
cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
|
||||
@ -2097,9 +2095,9 @@ void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
|
||||
|
||||
void G1ConcurrentMark::print_on_error(outputStream* st) const {
|
||||
st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
|
||||
p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
|
||||
_prevMarkBitMap->print_on_error(st, " Prev Bits: ");
|
||||
_nextMarkBitMap->print_on_error(st, " Next Bits: ");
|
||||
p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
|
||||
_prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
|
||||
_next_mark_bitmap->print_on_error(st, " Next Bits: ");
|
||||
}
|
||||
|
||||
static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
|
||||
@ -2177,9 +2175,9 @@ void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
|
||||
_cm_oop_closure = cm_oop_closure;
|
||||
}
|
||||
|
||||
void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
|
||||
guarantee(nextMarkBitMap != NULL, "invariant");
|
||||
_nextMarkBitMap = nextMarkBitMap;
|
||||
void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
|
||||
guarantee(next_mark_bitmap != NULL, "invariant");
|
||||
_next_mark_bitmap = next_mark_bitmap;
|
||||
clear_region_fields();
|
||||
|
||||
_calls = 0;
|
||||
@ -2221,7 +2219,9 @@ void G1CMTask::regular_clock_call() {
|
||||
// If we are not concurrent (i.e. we're doing remark) we don't need
|
||||
// to check anything else. The other steps are only needed during
|
||||
// the concurrent marking phase.
|
||||
if (!concurrent()) return;
|
||||
if (!_concurrent) {
|
||||
return;
|
||||
}
|
||||
|
||||
// (2) If marking has been aborted for Full GC, then we also abort.
|
||||
if (_cm->has_aborted()) {
|
||||
@ -2273,10 +2273,8 @@ void G1CMTask::decrease_limits() {
|
||||
// entries to/from the global stack). It basically tries to decrease the
|
||||
// scanning limit so that the clock is called earlier.
|
||||
|
||||
_words_scanned_limit = _real_words_scanned_limit -
|
||||
3 * words_scanned_period / 4;
|
||||
_refs_reached_limit = _real_refs_reached_limit -
|
||||
3 * refs_reached_period / 4;
|
||||
_words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
|
||||
_refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
|
||||
}
|
||||
|
||||
void G1CMTask::move_entries_to_global_stack() {
|
||||
@ -2415,7 +2413,7 @@ void G1CMTask::drain_satb_buffers() {
|
||||
_draining_satb_buffers = false;
|
||||
|
||||
assert(has_aborted() ||
|
||||
concurrent() ||
|
||||
_concurrent ||
|
||||
satb_mq_set.completed_buffers_num() == 0, "invariant");
|
||||
|
||||
// again, this was a potentially expensive operation, decrease the
|
||||
@ -2424,7 +2422,7 @@ void G1CMTask::drain_satb_buffers() {
|
||||
}
|
||||
|
||||
void G1CMTask::print_stats() {
|
||||
log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
|
||||
log_debug(gc, stats)("Marking Stats, task = %u, calls = %u",
|
||||
_worker_id, _calls);
|
||||
log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
|
||||
_elapsed_time_ms, _termination_time_ms);
|
||||
@ -2558,21 +2556,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
bool do_termination,
|
||||
bool is_serial) {
|
||||
assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
|
||||
assert(concurrent() == _cm->concurrent(), "they should be the same");
|
||||
|
||||
G1Policy* g1_policy = _g1h->g1_policy();
|
||||
assert(_task_queues != NULL, "invariant");
|
||||
assert(_task_queue != NULL, "invariant");
|
||||
assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
|
||||
|
||||
assert(!_claimed,
|
||||
"only one thread should claim this task at any one time");
|
||||
|
||||
// OK, this doesn't safeguard again all possible scenarios, as it is
|
||||
// possible for two threads to set the _claimed flag at the same
|
||||
// time. But it is only for debugging purposes anyway and it will
|
||||
// catch most problems.
|
||||
_claimed = true;
|
||||
assert(_concurrent == _cm->concurrent(), "they should be the same");
|
||||
|
||||
_start_time_ms = os::elapsedVTime() * 1000.0;
|
||||
|
||||
@ -2657,7 +2641,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
giveup_current_region();
|
||||
regular_clock_call();
|
||||
} else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
|
||||
if (_nextMarkBitMap->is_marked(mr.start())) {
|
||||
if (_next_mark_bitmap->is_marked(mr.start())) {
|
||||
// The object is marked - apply the closure
|
||||
bitmap_closure.do_addr(mr.start());
|
||||
}
|
||||
@ -2665,7 +2649,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// we can (and should) give up the current region.
|
||||
giveup_current_region();
|
||||
regular_clock_call();
|
||||
} else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
|
||||
} else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
|
||||
giveup_current_region();
|
||||
regular_clock_call();
|
||||
} else {
|
||||
@ -2793,10 +2777,10 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// We're all done.
|
||||
|
||||
if (_worker_id == 0) {
|
||||
// let's allow task 0 to do this
|
||||
if (concurrent()) {
|
||||
// Let's allow task 0 to do this
|
||||
if (_concurrent) {
|
||||
assert(_cm->concurrent_marking_in_progress(), "invariant");
|
||||
// we need to set this to false before the next
|
||||
// We need to set this to false before the next
|
||||
// safepoint. This way we ensure that the marking phase
|
||||
// doesn't observe any more heap expansions.
|
||||
_cm->clear_concurrent_marking_in_progress();
|
||||
@ -2868,24 +2852,20 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// ready to restart.
|
||||
}
|
||||
}
|
||||
|
||||
_claimed = false;
|
||||
}
|
||||
|
||||
G1CMTask::G1CMTask(uint worker_id,
|
||||
G1ConcurrentMark* cm,
|
||||
G1CMTaskQueue* task_queue,
|
||||
G1CMTaskQueueSet* task_queues)
|
||||
G1CMTaskQueue* task_queue)
|
||||
: _g1h(G1CollectedHeap::heap()),
|
||||
_worker_id(worker_id), _cm(cm),
|
||||
_worker_id(worker_id),
|
||||
_cm(cm),
|
||||
_objArray_processor(this),
|
||||
_claimed(false),
|
||||
_nextMarkBitMap(NULL), _hash_seed(17),
|
||||
_next_mark_bitmap(NULL),
|
||||
_hash_seed(17),
|
||||
_task_queue(task_queue),
|
||||
_task_queues(task_queues),
|
||||
_cm_oop_closure(NULL) {
|
||||
guarantee(task_queue != NULL, "invariant");
|
||||
guarantee(task_queues != NULL, "invariant");
|
||||
|
||||
_marking_step_diffs_ms.add(0.5);
|
||||
}
|
||||
|
@ -25,18 +25,18 @@
|
||||
#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
|
||||
#define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
|
||||
class ConcurrentGCTimer;
|
||||
class ConcurrentMarkThread;
|
||||
class G1CollectedHeap;
|
||||
class G1CMTask;
|
||||
class G1ConcurrentMark;
|
||||
class ConcurrentGCTimer;
|
||||
class G1OldTracer;
|
||||
class G1RegionToSpaceMapper;
|
||||
class G1SurvivorRegions;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
@ -272,12 +272,8 @@ public:
|
||||
bool wait_until_scan_finished();
|
||||
};
|
||||
|
||||
class ConcurrentMarkThread;
|
||||
|
||||
class G1ConcurrentMark: public CHeapObj<mtGC> {
|
||||
friend class ConcurrentMarkThread;
|
||||
friend class G1ParNoteEndTask;
|
||||
friend class G1VerifyLiveDataClosure;
|
||||
friend class G1CMRefProcTaskProxy;
|
||||
friend class G1CMRefProcTaskExecutor;
|
||||
friend class G1CMKeepAliveAndDrainClosure;
|
||||
@ -287,46 +283,43 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
|
||||
friend class G1CMRemarkTask;
|
||||
friend class G1CMTask;
|
||||
|
||||
protected:
|
||||
ConcurrentMarkThread* _cmThread; // The thread doing the work
|
||||
G1CollectedHeap* _g1h; // The heap
|
||||
uint _parallel_marking_threads; // The number of marking
|
||||
// threads we're using
|
||||
uint _max_parallel_marking_threads; // Max number of marking
|
||||
// threads we'll ever use
|
||||
double _sleep_factor; // How much we have to sleep, with
|
||||
// respect to the work we just did, to
|
||||
// meet the marking overhead goal
|
||||
double _marking_task_overhead; // Marking target overhead for
|
||||
// a single task
|
||||
ConcurrentMarkThread* _cm_thread; // The thread doing the work
|
||||
G1CollectedHeap* _g1h; // The heap
|
||||
uint _parallel_marking_threads; // The number of marking
|
||||
// threads we're using
|
||||
uint _max_parallel_marking_threads; // Max number of marking
|
||||
// threads we'll ever use
|
||||
double _sleep_factor; // How much we have to sleep, with
|
||||
// respect to the work we just did, to
|
||||
// meet the marking overhead goal
|
||||
bool _completed_initialization; // Set to true when initialization is complete
|
||||
|
||||
FreeRegionList _cleanup_list;
|
||||
FreeRegionList _cleanup_list;
|
||||
|
||||
// Concurrent marking support structures
|
||||
G1CMBitMap _markBitMap1;
|
||||
G1CMBitMap _markBitMap2;
|
||||
G1CMBitMap* _prevMarkBitMap; // Completed mark bitmap
|
||||
G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
|
||||
G1CMBitMap _mark_bitmap_1;
|
||||
G1CMBitMap _mark_bitmap_2;
|
||||
G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
|
||||
G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
|
||||
|
||||
// Heap bounds
|
||||
HeapWord* _heap_start;
|
||||
HeapWord* _heap_end;
|
||||
HeapWord* _heap_start;
|
||||
HeapWord* _heap_end;
|
||||
|
||||
// Root region tracking and claiming
|
||||
G1CMRootRegions _root_regions;
|
||||
G1CMRootRegions _root_regions;
|
||||
|
||||
// For gray objects
|
||||
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
|
||||
HeapWord* volatile _finger; // The global finger, region aligned,
|
||||
// always points to the end of the
|
||||
// last claimed region
|
||||
// For grey objects
|
||||
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
|
||||
HeapWord* volatile _finger; // The global finger, region aligned,
|
||||
// always pointing to the end of the
|
||||
// last claimed region
|
||||
|
||||
// Marking tasks
|
||||
uint _max_worker_id;// Maximum worker id
|
||||
uint _active_tasks; // Task num currently active
|
||||
G1CMTask** _tasks; // Task queue array (max_worker_id len)
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
ParallelTaskTerminator _terminator; // For termination
|
||||
uint _max_worker_id;// Maximum worker id
|
||||
uint _active_tasks; // Number of tasks currently active
|
||||
G1CMTask** _tasks; // Task queue array (max_worker_id length)
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
ParallelTaskTerminator _terminator; // For termination
|
||||
|
||||
// Two sync barriers that are used to synchronize tasks when an
|
||||
// overflow occurs. The algorithm is the following. All tasks enter
|
||||
@ -337,32 +330,32 @@ protected:
|
||||
// ensure, that no task starts doing work before all data
|
||||
// structures (local and global) have been re-initialized. When they
|
||||
// exit it, they are free to start working again.
|
||||
WorkGangBarrierSync _first_overflow_barrier_sync;
|
||||
WorkGangBarrierSync _second_overflow_barrier_sync;
|
||||
WorkGangBarrierSync _first_overflow_barrier_sync;
|
||||
WorkGangBarrierSync _second_overflow_barrier_sync;
|
||||
|
||||
// This is set by any task, when an overflow on the global data
|
||||
// structures is detected
|
||||
volatile bool _has_overflown;
|
||||
volatile bool _has_overflown;
|
||||
// True: marking is concurrent, false: we're in remark
|
||||
volatile bool _concurrent;
|
||||
volatile bool _concurrent;
|
||||
// Set at the end of a Full GC so that marking aborts
|
||||
volatile bool _has_aborted;
|
||||
volatile bool _has_aborted;
|
||||
|
||||
// Used when remark aborts due to an overflow to indicate that
|
||||
// another concurrent marking phase should start
|
||||
volatile bool _restart_for_overflow;
|
||||
volatile bool _restart_for_overflow;
|
||||
|
||||
// This is true from the very start of concurrent marking until the
|
||||
// point when all the tasks complete their work. It is really used
|
||||
// to determine the points between the end of concurrent marking and
|
||||
// time of remark.
|
||||
volatile bool _concurrent_marking_in_progress;
|
||||
volatile bool _concurrent_marking_in_progress;
|
||||
|
||||
ConcurrentGCTimer* _gc_timer_cm;
|
||||
ConcurrentGCTimer* _gc_timer_cm;
|
||||
|
||||
G1OldTracer* _gc_tracer_cm;
|
||||
G1OldTracer* _gc_tracer_cm;
|
||||
|
||||
// All of these times are in ms
|
||||
// Timing statistics. All of them are in ms
|
||||
NumberSeq _init_times;
|
||||
NumberSeq _remark_times;
|
||||
NumberSeq _remark_mark_times;
|
||||
@ -375,12 +368,12 @@ protected:
|
||||
|
||||
WorkGang* _parallel_workers;
|
||||
|
||||
void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
|
||||
void weakRefsWork(bool clear_all_soft_refs);
|
||||
void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
|
||||
void weak_refs_work(bool clear_all_soft_refs);
|
||||
|
||||
void swapMarkBitMaps();
|
||||
void swap_mark_bitmaps();
|
||||
|
||||
// It resets the global marking data structures, as well as the
|
||||
// Resets the global marking data structures, as well as the
|
||||
// task local ones; should be called during initial mark.
|
||||
void reset();
|
||||
|
||||
@ -395,7 +388,7 @@ protected:
|
||||
// Called to indicate how many threads are currently active.
|
||||
void set_concurrency(uint active_tasks);
|
||||
|
||||
// It should be called to indicate which phase we're in (concurrent
|
||||
// Should be called to indicate which phase we're in (concurrent
|
||||
// mark or remark) and how many threads are currently active.
|
||||
void set_concurrency_and_phase(uint active_tasks, bool concurrent);
|
||||
|
||||
@ -407,17 +400,14 @@ protected:
|
||||
}
|
||||
|
||||
// Accessor methods
|
||||
uint parallel_marking_threads() const { return _parallel_marking_threads; }
|
||||
uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
|
||||
double sleep_factor() { return _sleep_factor; }
|
||||
double marking_task_overhead() { return _marking_task_overhead;}
|
||||
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _active_tasks; }
|
||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||
|
||||
// It claims the next available region to be scanned by a marking
|
||||
// Claims the next available region to be scanned by a marking
|
||||
// task/thread. It might return NULL if the next region is empty or
|
||||
// we have run out of regions. In the latter case, out_of_regions()
|
||||
// determines whether we've really run out of regions or the task
|
||||
@ -433,30 +423,19 @@ protected:
|
||||
// frequently.
|
||||
HeapRegion* claim_region(uint worker_id);
|
||||
|
||||
// It determines whether we've run out of regions to scan. Note that
|
||||
// Determines whether we've run out of regions to scan. Note that
|
||||
// the finger can point past the heap end in case the heap was expanded
|
||||
// to satisfy an allocation without doing a GC. This is fine, because all
|
||||
// objects in those regions will be considered live anyway because of
|
||||
// SATB guarantees (i.e. their TAMS will be equal to bottom).
|
||||
bool out_of_regions() { return _finger >= _heap_end; }
|
||||
bool out_of_regions() { return _finger >= _heap_end; }
|
||||
|
||||
// Returns the task with the given id
|
||||
G1CMTask* task(int id) {
|
||||
assert(0 <= id && id < (int) _active_tasks,
|
||||
"task id not within active bounds");
|
||||
assert(0 <= id && id < (int) _active_tasks, "Task id %d not within active bounds up to %u", id, _active_tasks);
|
||||
return _tasks[id];
|
||||
}
|
||||
|
||||
// Returns the task queue with the given id
|
||||
G1CMTaskQueue* task_queue(int id) {
|
||||
assert(0 <= id && id < (int) _active_tasks,
|
||||
"task queue id not within active bounds");
|
||||
return (G1CMTaskQueue*) _task_queues->queue(id);
|
||||
}
|
||||
|
||||
// Returns the task queue set
|
||||
G1CMTaskQueueSet* task_queues() { return _task_queues; }
|
||||
|
||||
// Access / manipulation of the overflow flag which is set to
|
||||
// indicate that the global stack has overflown
|
||||
bool has_overflown() { return _has_overflown; }
|
||||
@ -468,16 +447,6 @@ protected:
|
||||
void enter_first_sync_barrier(uint worker_id);
|
||||
void enter_second_sync_barrier(uint worker_id);
|
||||
|
||||
// Card index of the bottom of the G1 heap. Used for biasing indices into
|
||||
// the card bitmaps.
|
||||
intptr_t _heap_bottom_card_num;
|
||||
|
||||
// Set to true when initialization is complete
|
||||
bool _completed_initialization;
|
||||
|
||||
// end_timer, true to end gc timer after ending concurrent phase.
|
||||
void register_concurrent_phase_end_common(bool end_timer);
|
||||
|
||||
// Clear the given bitmap in parallel using the given WorkGang. If may_yield is
|
||||
// true, periodically insert checks to see if this method should exit prematurely.
|
||||
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
|
||||
@ -495,13 +464,13 @@ public:
|
||||
bool mark_stack_pop(G1TaskQueueEntry* arr) {
|
||||
return _global_mark_stack.par_pop_chunk(arr);
|
||||
}
|
||||
size_t mark_stack_size() { return _global_mark_stack.size(); }
|
||||
size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
|
||||
bool mark_stack_empty() { return _global_mark_stack.is_empty(); }
|
||||
size_t mark_stack_size() const { return _global_mark_stack.size(); }
|
||||
size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
|
||||
bool mark_stack_empty() const { return _global_mark_stack.is_empty(); }
|
||||
|
||||
G1CMRootRegions* root_regions() { return &_root_regions; }
|
||||
|
||||
bool concurrent_marking_in_progress() {
|
||||
bool concurrent_marking_in_progress() const {
|
||||
return _concurrent_marking_in_progress;
|
||||
}
|
||||
void set_concurrent_marking_in_progress() {
|
||||
@ -533,10 +502,10 @@ public:
|
||||
G1RegionToSpaceMapper* next_bitmap_storage);
|
||||
~G1ConcurrentMark();
|
||||
|
||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||
ConcurrentMarkThread* cm_thread() { return _cm_thread; }
|
||||
|
||||
const G1CMBitMap* const prevMarkBitMap() const { return _prevMarkBitMap; }
|
||||
G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
|
||||
const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
|
||||
G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
|
||||
|
||||
// Returns the number of GC threads to be used in a concurrent
|
||||
// phase based on the number of GC threads being used in a STW
|
||||
@ -556,48 +525,49 @@ public:
|
||||
|
||||
// Return whether the next mark bitmap has no marks set. To be used for assertions
|
||||
// only. Will not yield to pause requests.
|
||||
bool nextMarkBitmapIsClear();
|
||||
bool next_mark_bitmap_is_clear();
|
||||
|
||||
// These two do the work that needs to be done before and after the
|
||||
// initial root checkpoint. Since this checkpoint can be done at two
|
||||
// different points (i.e. an explicit pause or piggy-backed on a
|
||||
// young collection), then it's nice to be able to easily share the
|
||||
// pre/post code. It might be the case that we can put everything in
|
||||
// the post method. TP
|
||||
void checkpointRootsInitialPre();
|
||||
void checkpointRootsInitialPost();
|
||||
// the post method.
|
||||
void checkpoint_roots_initial_pre();
|
||||
void checkpoint_roots_initial_post();
|
||||
|
||||
// Scan all the root regions and mark everything reachable from
|
||||
// them.
|
||||
void scan_root_regions();
|
||||
|
||||
// Scan a single root region and mark everything reachable from it.
|
||||
void scanRootRegion(HeapRegion* hr);
|
||||
void scan_root_region(HeapRegion* hr);
|
||||
|
||||
// Do concurrent phase of marking, to a tentative transitive closure.
|
||||
void mark_from_roots();
|
||||
|
||||
void checkpointRootsFinal(bool clear_all_soft_refs);
|
||||
void checkpointRootsFinalWork();
|
||||
void checkpoint_roots_final(bool clear_all_soft_refs);
|
||||
void checkpoint_roots_final_work();
|
||||
|
||||
void cleanup();
|
||||
void complete_cleanup();
|
||||
|
||||
// Mark in the previous bitmap. NB: this is usually read-only, so use
|
||||
// this carefully!
|
||||
inline void markPrev(oop p);
|
||||
// Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
|
||||
// this carefully.
|
||||
inline void mark_in_prev_bitmap(oop p);
|
||||
|
||||
// Clears marks for all objects in the given range, for the prev or
|
||||
// next bitmaps. NB: the previous bitmap is usually
|
||||
// next bitmaps. Caution: the previous bitmap is usually
|
||||
// read-only, so use this carefully!
|
||||
void clearRangePrevBitmap(MemRegion mr);
|
||||
void clear_range_in_prev_bitmap(MemRegion mr);
|
||||
|
||||
inline bool is_marked_in_prev_bitmap(oop p) const;
|
||||
|
||||
// Verify that there are no CSet oops on the stacks (taskqueues /
|
||||
// global mark stack) and fingers (global / per-task).
|
||||
// If marking is not in progress, it's a no-op.
|
||||
void verify_no_cset_oops() PRODUCT_RETURN;
|
||||
|
||||
inline bool isPrevMarked(oop p) const;
|
||||
|
||||
inline bool do_yield_check();
|
||||
|
||||
// Abandon current marking iteration due to a Full GC.
|
||||
@ -661,78 +631,71 @@ private:
|
||||
uint _worker_id;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
G1CMBitMap* _nextMarkBitMap;
|
||||
G1CMBitMap* _next_mark_bitmap;
|
||||
// the task queue of this task
|
||||
G1CMTaskQueue* _task_queue;
|
||||
private:
|
||||
// the task queue set---needed for stealing
|
||||
G1CMTaskQueueSet* _task_queues;
|
||||
// indicates whether the task has been claimed---this is only for
|
||||
// debugging purposes
|
||||
bool _claimed;
|
||||
|
||||
// number of calls to this task
|
||||
int _calls;
|
||||
// Number of calls to this task
|
||||
uint _calls;
|
||||
|
||||
// when the virtual timer reaches this time, the marking step should
|
||||
// exit
|
||||
// When the virtual timer reaches this time, the marking step should exit
|
||||
double _time_target_ms;
|
||||
// the start time of the current marking step
|
||||
// Start time of the current marking step
|
||||
double _start_time_ms;
|
||||
|
||||
// the oop closure used for iterations over oops
|
||||
// Oop closure used for iterations over oops
|
||||
G1CMOopClosure* _cm_oop_closure;
|
||||
|
||||
// the region this task is scanning, NULL if we're not scanning any
|
||||
// Region this task is scanning, NULL if we're not scanning any
|
||||
HeapRegion* _curr_region;
|
||||
// the local finger of this task, NULL if we're not scanning a region
|
||||
// Local finger of this task, NULL if we're not scanning a region
|
||||
HeapWord* _finger;
|
||||
// limit of the region this task is scanning, NULL if we're not scanning one
|
||||
// Limit of the region this task is scanning, NULL if we're not scanning one
|
||||
HeapWord* _region_limit;
|
||||
|
||||
// the number of words this task has scanned
|
||||
// Number of words this task has scanned
|
||||
size_t _words_scanned;
|
||||
// When _words_scanned reaches this limit, the regular clock is
|
||||
// called. Notice that this might be decreased under certain
|
||||
// circumstances (i.e. when we believe that we did an expensive
|
||||
// operation).
|
||||
size_t _words_scanned_limit;
|
||||
// the initial value of _words_scanned_limit (i.e. what it was
|
||||
// Initial value of _words_scanned_limit (i.e. what it was
|
||||
// before it was decreased).
|
||||
size_t _real_words_scanned_limit;
|
||||
|
||||
// the number of references this task has visited
|
||||
// Number of references this task has visited
|
||||
size_t _refs_reached;
|
||||
// When _refs_reached reaches this limit, the regular clock is
|
||||
// called. Notice this this might be decreased under certain
|
||||
// circumstances (i.e. when we believe that we did an expensive
|
||||
// operation).
|
||||
size_t _refs_reached_limit;
|
||||
// the initial value of _refs_reached_limit (i.e. what it was before
|
||||
// Initial value of _refs_reached_limit (i.e. what it was before
|
||||
// it was decreased).
|
||||
size_t _real_refs_reached_limit;
|
||||
|
||||
// used by the work stealing stuff
|
||||
// Used by the work stealing
|
||||
int _hash_seed;
|
||||
// if this is true, then the task has aborted for some reason
|
||||
// If true, then the task has aborted for some reason
|
||||
bool _has_aborted;
|
||||
// set when the task aborts because it has met its time quota
|
||||
// Set when the task aborts because it has met its time quota
|
||||
bool _has_timed_out;
|
||||
// true when we're draining SATB buffers; this avoids the task
|
||||
// True when we're draining SATB buffers; this avoids the task
|
||||
// aborting due to SATB buffers being available (as we're already
|
||||
// dealing with them)
|
||||
bool _draining_satb_buffers;
|
||||
|
||||
// number sequence of past step times
|
||||
// Number sequence of past step times
|
||||
NumberSeq _step_times_ms;
|
||||
// elapsed time of this task
|
||||
// Elapsed time of this task
|
||||
double _elapsed_time_ms;
|
||||
// termination time of this task
|
||||
// Termination time of this task
|
||||
double _termination_time_ms;
|
||||
// when this task got into the termination protocol
|
||||
// When this task got into the termination protocol
|
||||
double _termination_start_time_ms;
|
||||
|
||||
// true when the task is during a concurrent phase, false when it is
|
||||
// True when the task is during a concurrent phase, false when it is
|
||||
// in the remark phase (so, in the latter case, we do not have to
|
||||
// check all the things that we have to check during the concurrent
|
||||
// phase, i.e. SATB buffer availability...)
|
||||
@ -740,21 +703,21 @@ private:
|
||||
|
||||
TruncatedSeq _marking_step_diffs_ms;
|
||||
|
||||
// it updates the local fields after this task has claimed
|
||||
// Updates the local fields after this task has claimed
|
||||
// a new region to scan
|
||||
void setup_for_region(HeapRegion* hr);
|
||||
// it brings up-to-date the limit of the region
|
||||
// Makes the limit of the region up-to-date
|
||||
void update_region_limit();
|
||||
|
||||
// called when either the words scanned or the refs visited limit
|
||||
// Called when either the words scanned or the refs visited limit
|
||||
// has been reached
|
||||
void reached_limit();
|
||||
// recalculates the words scanned and refs visited limits
|
||||
// Recalculates the words scanned and refs visited limits
|
||||
void recalculate_limits();
|
||||
// decreases the words scanned and refs visited limits when we reach
|
||||
// Decreases the words scanned and refs visited limits when we reach
|
||||
// an expensive operation
|
||||
void decrease_limits();
|
||||
// it checks whether the words scanned or refs visited reached their
|
||||
// Checks whether the words scanned or refs visited reached their
|
||||
// respective limit and calls reached_limit() if they have
|
||||
void check_limits() {
|
||||
if (_words_scanned >= _words_scanned_limit ||
|
||||
@ -762,11 +725,10 @@ private:
|
||||
reached_limit();
|
||||
}
|
||||
}
|
||||
// this is supposed to be called regularly during a marking step as
|
||||
// Supposed to be called regularly during a marking step as
|
||||
// it checks a bunch of conditions that might cause the marking step
|
||||
// to abort
|
||||
void regular_clock_call();
|
||||
bool concurrent() { return _concurrent; }
|
||||
|
||||
// Test whether obj might have already been passed over by the
|
||||
// mark bitmap scan, and so needs to be pushed onto the mark stack.
|
||||
@ -777,10 +739,9 @@ public:
|
||||
// Apply the closure on the given area of the objArray. Return the number of words
|
||||
// scanned.
|
||||
inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
|
||||
// It resets the task; it should be called right at the beginning of
|
||||
// a marking phase.
|
||||
void reset(G1CMBitMap* _nextMarkBitMap);
|
||||
// it clears all the fields that correspond to a claimed region.
|
||||
// Resets the task; should be called right at the beginning of a marking phase.
|
||||
void reset(G1CMBitMap* next_mark_bitmap);
|
||||
// Clears all the fields that correspond to a claimed region.
|
||||
void clear_region_fields();
|
||||
|
||||
void set_concurrent(bool concurrent) { _concurrent = concurrent; }
|
||||
@ -801,7 +762,7 @@ public:
|
||||
_elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
|
||||
}
|
||||
|
||||
// returns the worker ID associated with this task.
|
||||
// Returns the worker ID associated with this task.
|
||||
uint worker_id() { return _worker_id; }
|
||||
|
||||
// From TerminatorTerminator. It determines whether this task should
|
||||
@ -818,8 +779,6 @@ public:
|
||||
bool has_aborted() { return _has_aborted; }
|
||||
void set_has_aborted() { _has_aborted = true; }
|
||||
void clear_has_aborted() { _has_aborted = false; }
|
||||
bool has_timed_out() { return _has_timed_out; }
|
||||
bool claimed() { return _claimed; }
|
||||
|
||||
void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
|
||||
|
||||
@ -836,10 +795,10 @@ public:
|
||||
// Precondition: obj is a valid heap object.
|
||||
inline void deal_with_reference(oop obj);
|
||||
|
||||
// It scans an object and visits its children.
|
||||
// Scans an object and visits its children.
|
||||
inline void scan_task_entry(G1TaskQueueEntry task_entry);
|
||||
|
||||
// It pushes an object on the local queue.
|
||||
// Pushes an object on the local queue.
|
||||
inline void push(G1TaskQueueEntry task_entry);
|
||||
|
||||
// Move entries to the global stack.
|
||||
@ -847,20 +806,20 @@ public:
|
||||
// Move entries from the global stack, return true if we were successful to do so.
|
||||
bool get_entries_from_global_stack();
|
||||
|
||||
// It pops and scans objects from the local queue. If partially is
|
||||
// Pops and scans objects from the local queue. If partially is
|
||||
// true, then it stops when the queue size is of a given limit. If
|
||||
// partially is false, then it stops when the queue is empty.
|
||||
void drain_local_queue(bool partially);
|
||||
// It moves entries from the global stack to the local queue and
|
||||
// Moves entries from the global stack to the local queue and
|
||||
// drains the local queue. If partially is true, then it stops when
|
||||
// both the global stack and the local queue reach a given size. If
|
||||
// partially if false, it tries to empty them totally.
|
||||
void drain_global_stack(bool partially);
|
||||
// It keeps picking SATB buffers and processing them until no SATB
|
||||
// Keeps picking SATB buffers and processing them until no SATB
|
||||
// buffers are available.
|
||||
void drain_satb_buffers();
|
||||
|
||||
// moves the local finger to a new location
|
||||
// Moves the local finger to a new location
|
||||
inline void move_finger_to(HeapWord* new_finger) {
|
||||
assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
|
||||
_finger = new_finger;
|
||||
@ -868,10 +827,9 @@ public:
|
||||
|
||||
G1CMTask(uint worker_id,
|
||||
G1ConcurrentMark *cm,
|
||||
G1CMTaskQueue* task_queue,
|
||||
G1CMTaskQueueSet* task_queues);
|
||||
G1CMTaskQueue* task_queue);
|
||||
|
||||
// it prints statistics associated with this task
|
||||
// Prints statistics associated with this task
|
||||
void print_stats();
|
||||
};
|
||||
|
||||
|
@ -51,12 +51,8 @@ inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop cons
|
||||
assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start()));
|
||||
|
||||
HeapWord* const obj_addr = (HeapWord*)obj;
|
||||
// Dirty read to avoid CAS.
|
||||
if (_nextMarkBitMap->is_marked(obj_addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return _nextMarkBitMap->par_mark(obj_addr);
|
||||
return _next_mark_bitmap->par_mark(obj_addr);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -90,7 +86,7 @@ inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant"); // FIXME!!!
|
||||
assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()), "invariant");
|
||||
assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), "invariant");
|
||||
|
||||
if (!_task_queue->push(task_entry)) {
|
||||
// The local task queue looks full. We need to push some entries
|
||||
@ -138,7 +134,7 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
|
||||
template<bool scan>
|
||||
inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
|
||||
assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
|
||||
assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()),
|
||||
assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()),
|
||||
"Any stolen object should be a slice or marked");
|
||||
|
||||
if (scan) {
|
||||
@ -211,14 +207,14 @@ inline void G1CMTask::deal_with_reference(oop obj) {
|
||||
make_reference_grey(obj);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::markPrev(oop p) {
|
||||
assert(!_prevMarkBitMap->is_marked((HeapWord*) p), "sanity");
|
||||
_prevMarkBitMap->mark((HeapWord*) p);
|
||||
inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) {
|
||||
assert(!_prev_mark_bitmap->is_marked((HeapWord*) p), "sanity");
|
||||
_prev_mark_bitmap->mark((HeapWord*) p);
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::isPrevMarked(oop p) const {
|
||||
bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const {
|
||||
assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
|
||||
return _prevMarkBitMap->is_marked((HeapWord*)p);
|
||||
return _prev_mark_bitmap->is_marked((HeapWord*)p);
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::do_yield_check() {
|
||||
|
@ -538,7 +538,7 @@ CollectionSetChooser* G1DefaultPolicy::cset_chooser() const {
|
||||
}
|
||||
|
||||
bool G1DefaultPolicy::about_to_start_mixed_phase() const {
|
||||
return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
|
||||
return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
|
||||
}
|
||||
|
||||
bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
|
||||
@ -931,7 +931,7 @@ bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_caus
|
||||
// We actually check whether we are marking here and not if we are in a
|
||||
// reclamation phase. This means that we will schedule a concurrent mark
|
||||
// even while we are still in the process of reclaiming memory.
|
||||
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
|
||||
bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
|
||||
if (!during_cycle) {
|
||||
log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
|
@ -110,8 +110,8 @@ public:
|
||||
// We consider all objects that we find self-forwarded to be
|
||||
// live. What we'll do is that we'll update the prev marking
|
||||
// info so that they are all under PTAMS and explicitly marked.
|
||||
if (!_cm->isPrevMarked(obj)) {
|
||||
_cm->markPrev(obj);
|
||||
if (!_cm->is_marked_in_prev_bitmap(obj)) {
|
||||
_cm->mark_in_prev_bitmap(obj);
|
||||
}
|
||||
if (_during_initial_mark) {
|
||||
// For the next marking info we'll only mark the
|
||||
@ -181,7 +181,7 @@ public:
|
||||
#endif
|
||||
}
|
||||
}
|
||||
_cm->clearRangePrevBitmap(mr);
|
||||
_cm->clear_range_in_prev_bitmap(mr);
|
||||
}
|
||||
|
||||
void zap_remainder() {
|
||||
|
@ -647,8 +647,8 @@ bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1C
|
||||
}
|
||||
|
||||
bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
|
||||
const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->nextMarkBitMap();
|
||||
const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap();
|
||||
const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap();
|
||||
|
||||
HeapWord* ptams = hr->prev_top_at_mark_start();
|
||||
HeapWord* ntams = hr->next_top_at_mark_start();
|
||||
|
@ -177,7 +177,7 @@ inline size_t HeapRegion::block_size(const HeapWord *addr) const {
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
|
||||
return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
|
||||
@ -334,7 +334,7 @@ bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
}
|
||||
#endif
|
||||
|
||||
const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prevMarkBitMap();
|
||||
const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap();
|
||||
do {
|
||||
oop obj = oop(cur);
|
||||
assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
|
@ -448,7 +448,7 @@ WB_END
|
||||
WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
|
||||
if (UseG1GC) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
return g1h->concurrent_mark()->cmThread()->during_cycle();
|
||||
return g1h->concurrent_mark()->cm_thread()->during_cycle();
|
||||
}
|
||||
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1InConcurrentMark: G1 GC is not enabled");
|
||||
WB_END
|
||||
@ -456,7 +456,7 @@ WB_END
|
||||
WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
|
||||
if (UseG1GC) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
|
||||
if (!g1h->concurrent_mark()->cm_thread()->during_cycle()) {
|
||||
g1h->collect(GCCause::_wb_conc_mark);
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user