8080113: Remove CollectedHeap::set_par_threads()
Reviewed-by: jmasa, kbarrett
This commit is contained in:
parent
8d0f1a6528
commit
5dc3521a80
@ -3017,14 +3017,12 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
StrongRootsScope srs(n_workers);
|
||||
|
||||
CMSParInitialMarkTask tsk(this, &srs, n_workers);
|
||||
gch->set_par_threads(n_workers);
|
||||
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
||||
if (n_workers > 1) {
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
tsk.work(0);
|
||||
}
|
||||
gch->set_par_threads(0);
|
||||
} else {
|
||||
// The serial version.
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
@ -5088,8 +5086,6 @@ void CMSCollector::do_remark_parallel() {
|
||||
|
||||
CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
|
||||
|
||||
// Set up for parallel process_roots work.
|
||||
gch->set_par_threads(n_workers);
|
||||
// We won't be iterating over the cards in the card table updating
|
||||
// the younger_gen cards, so we shouldn't call the following else
|
||||
// the verification code as well as subsequent younger_refs_iterate
|
||||
@ -5127,7 +5123,6 @@ void CMSCollector::do_remark_parallel() {
|
||||
tsk.work(0);
|
||||
}
|
||||
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
// restore, single-threaded for now, any preserved marks
|
||||
// as a result of work_q overflow
|
||||
restore_preserved_marks_if_any();
|
||||
|
@ -836,7 +836,6 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
{
|
||||
_state_set.flush();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
gch->save_marks();
|
||||
}
|
||||
|
||||
@ -954,7 +953,6 @@ void ParNewGeneration::collect(bool full,
|
||||
StrongRootsScope srs(n_workers);
|
||||
|
||||
ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
|
||||
gch->set_par_threads(n_workers);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(true);
|
||||
// It turns out that even when we're using 1 thread, doing the work in a
|
||||
// separate thread causes wide variance in run times. We can't help this
|
||||
@ -996,7 +994,6 @@ void ParNewGeneration::collect(bool full,
|
||||
_gc_timer, _gc_tracer.gc_id());
|
||||
} else {
|
||||
thread_state_set.flush();
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
gch->save_marks();
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, NULL,
|
||||
|
@ -1941,11 +1941,7 @@ void ConcurrentMark::cleanup() {
|
||||
// Do counting once more with the world stopped for good measure.
|
||||
G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
|
||||
|
||||
g1h->set_par_threads();
|
||||
uint n_workers = _g1h->workers()->active_workers();
|
||||
g1h->workers()->run_task(&g1_par_count_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
// Verify that the counting data accumulated during marking matches
|
||||
@ -1961,10 +1957,7 @@ void ConcurrentMark::cleanup() {
|
||||
&expected_region_bm,
|
||||
&expected_card_bm);
|
||||
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_verify_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
|
||||
}
|
||||
@ -1986,11 +1979,11 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
g1h->reset_gc_time_stamp();
|
||||
|
||||
uint n_workers = _g1h->workers()->active_workers();
|
||||
|
||||
// Note end of marking in all heap regions.
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_note_end_task);
|
||||
g1h->set_par_threads(0);
|
||||
g1h->check_gc_time_stamps();
|
||||
|
||||
if (!cleanup_list_is_empty()) {
|
||||
@ -2005,9 +1998,7 @@ void ConcurrentMark::cleanup() {
|
||||
if (G1ScrubRemSets) {
|
||||
double rs_scrub_start = os::elapsedTime();
|
||||
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_scrub_rs_task);
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
double rs_scrub_end = os::elapsedTime();
|
||||
double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
|
||||
@ -2308,9 +2299,7 @@ void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
|
||||
// and overflow handling in CMTask::do_marking_step() knows
|
||||
// how many workers to wait for.
|
||||
_cm->set_concurrency(_active_workers);
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&proc_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
|
||||
@ -2340,9 +2329,7 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
// and overflow handling in CMTask::do_marking_step() knows
|
||||
// how many workers to wait for.
|
||||
_cm->set_concurrency(_active_workers);
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&enq_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
|
||||
@ -2624,9 +2611,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
// We will start all available threads, even if we decide that the
|
||||
// active_workers will be fewer. The extra ones will just bail out
|
||||
// immediately.
|
||||
g1h->set_par_threads(active_workers);
|
||||
g1h->workers()->run_task(&remarkTask);
|
||||
g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
@ -3000,9 +2985,7 @@ void ConcurrentMark::aggregate_count_data() {
|
||||
G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
|
||||
_max_worker_id, n_workers);
|
||||
|
||||
_g1h->set_par_threads(n_workers);
|
||||
_g1h->workers()->run_task(&g1_par_agg_task);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// Clear the per-worker arrays used to store the per-region counting data
|
||||
|
@ -1330,23 +1330,12 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
workers()->set_active_workers(n_workers);
|
||||
// Set parallel threads in the heap (_n_par_threads) only
|
||||
// before a parallel phase and always reset it to 0 after
|
||||
// the phase so that the number of parallel threads does
|
||||
// no get carried forward to a serial phase where there
|
||||
// may be code that is "possibly_parallel".
|
||||
set_par_threads(n_workers);
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
// Use the most recent number of active workers
|
||||
assert(workers()->active_workers() > 0,
|
||||
"Active workers not properly set");
|
||||
set_par_threads(workers()->active_workers());
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
set_par_threads(0);
|
||||
|
||||
// Rebuild the strong code root lists for each region
|
||||
rebuild_strong_code_roots();
|
||||
@ -3045,10 +3034,7 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
uint n_workers = workers()->active_workers();
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&task);
|
||||
set_par_threads(0);
|
||||
if (task.failures()) {
|
||||
failures = true;
|
||||
}
|
||||
@ -4041,10 +4027,8 @@ void G1CollectedHeap::finalize_for_evac_failure() {
|
||||
void G1CollectedHeap::remove_self_forwarding_pointers() {
|
||||
double remove_self_forwards_start = os::elapsedTime();
|
||||
|
||||
set_par_threads();
|
||||
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
|
||||
workers()->run_task(&rsfp_task);
|
||||
set_par_threads(0);
|
||||
|
||||
// Now restore saved marks, if any.
|
||||
assert(_objs_with_preserved_marks.size() ==
|
||||
@ -4810,19 +4794,14 @@ void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
|
||||
|
||||
G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
|
||||
n_workers, class_unloading_occurred);
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
set_par_threads(0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
|
||||
bool process_strings, bool process_symbols) {
|
||||
{
|
||||
uint n_workers = workers()->active_workers();
|
||||
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
set_par_threads(0);
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
@ -4850,13 +4829,9 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
|
||||
void G1CollectedHeap::redirty_logged_cards() {
|
||||
double redirty_logged_cards_start = os::elapsedTime();
|
||||
|
||||
uint n_workers = workers()->active_workers();
|
||||
|
||||
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
|
||||
dirty_card_queue_set().reset_for_par_iteration();
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&redirty_task);
|
||||
set_par_threads(0);
|
||||
|
||||
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
|
||||
dcq.merge_bufferlists(&dirty_card_queue_set());
|
||||
@ -5092,9 +5067,7 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
|
||||
ParallelTaskTerminator terminator(_active_workers, _queues);
|
||||
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
|
||||
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&proc_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// Gang task for parallel reference enqueueing.
|
||||
@ -5123,9 +5096,7 @@ void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
|
||||
G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
|
||||
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&enq_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// End of weak reference support closures
|
||||
@ -5247,15 +5218,12 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
|
||||
assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
|
||||
|
||||
set_par_threads(no_of_gc_workers);
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
|
||||
set_par_threads(0);
|
||||
|
||||
// Closure to test whether a referent is alive.
|
||||
G1STWIsAliveClosure is_alive(this);
|
||||
|
||||
@ -5382,8 +5350,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
set_par_threads(n_workers);
|
||||
|
||||
|
||||
init_for_evac_failure(NULL);
|
||||
|
||||
@ -5424,8 +5390,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
(os::elapsedTime() - end_par_time_sec) * 1000.0;
|
||||
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
|
||||
|
||||
set_par_threads(0);
|
||||
|
||||
// Process any discovered reference objects - we have
|
||||
// to do this _before_ we retire the GC alloc regions
|
||||
// as we may have to copy some 'reachable' referent
|
||||
@ -5778,9 +5742,7 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
// Iterate over the dirty cards region list.
|
||||
G1ParCleanupCTTask cleanup_task(ct_bs, this);
|
||||
|
||||
set_par_threads();
|
||||
workers()->run_task(&cleanup_task);
|
||||
set_par_threads(0);
|
||||
#ifndef PRODUCT
|
||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||
G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
|
||||
@ -6313,21 +6275,6 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
g1mm()->update_eden_size();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_par_threads() {
|
||||
// Don't change the number of workers. Use the value previously set
|
||||
// in the workgroup.
|
||||
uint n_workers = workers()->active_workers();
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"Otherwise should be using the total number of workers");
|
||||
if (n_workers == 0) {
|
||||
assert(false, "Should have been set in prior evacuation pause.");
|
||||
n_workers = ParallelGCThreads;
|
||||
workers()->set_active_workers(n_workers);
|
||||
}
|
||||
set_par_threads(n_workers);
|
||||
}
|
||||
|
||||
// Methods for the GC alloc regions
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
|
@ -1012,11 +1012,6 @@ public:
|
||||
// Initialize weak reference processing.
|
||||
void ref_processing_init();
|
||||
|
||||
// Explicitly import set_par_threads into this scope
|
||||
using CollectedHeap::set_par_threads;
|
||||
// Set _n_par_threads according to a policy TBD.
|
||||
void set_par_threads();
|
||||
|
||||
virtual Name kind() const {
|
||||
return CollectedHeap::G1CollectedHeap;
|
||||
}
|
||||
|
@ -153,9 +153,7 @@ void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
|
||||
|
||||
G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->set_par_threads();
|
||||
g1h->workers()->run_task(&task);
|
||||
g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void G1StringDedup::threads_do(ThreadClosure* tc) {
|
||||
|
@ -2029,7 +2029,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
// Set the number of GC threads to be used in this collection
|
||||
gc_task_manager()->set_active_gang();
|
||||
gc_task_manager()->task_idle_workers();
|
||||
heap->set_par_threads(gc_task_manager()->active_workers());
|
||||
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
||||
|
@ -382,7 +382,6 @@ bool PSScavenge::invoke_no_policy() {
|
||||
// Get the active number of workers here and use that value
|
||||
// throughout the methods.
|
||||
uint active_workers = gc_task_manager()->active_workers();
|
||||
heap->set_par_threads(active_workers);
|
||||
|
||||
PSPromotionManager::pre_scavenge();
|
||||
|
||||
|
@ -627,7 +627,9 @@ void DefNewGeneration::collect(bool full,
|
||||
"save marks have not been newly set.");
|
||||
|
||||
{
|
||||
// SerialGC runs with n_workers == 0.
|
||||
// DefNew needs to run with n_threads == 0, to make sure the serial
|
||||
// version of the card table scanning code is used.
|
||||
// See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel.
|
||||
StrongRootsScope srs(0);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
|
@ -452,7 +452,7 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
} else {
|
||||
// clear_cl finds contiguous dirty ranges of cards to process and clear.
|
||||
|
||||
// This is the single-threaded version.
|
||||
// This is the single-threaded version used by DefNew.
|
||||
const bool parallel = false;
|
||||
|
||||
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
|
||||
|
@ -290,9 +290,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
}
|
||||
GCCause::Cause gc_cause() { return _gc_cause; }
|
||||
|
||||
// May be overridden to set additional parallelism.
|
||||
virtual void set_par_threads(uint t) { (void)t; };
|
||||
|
||||
// General obj/array allocation facilities.
|
||||
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
|
||||
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
||||
|
@ -561,11 +561,6 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
|
||||
return collector_policy()->satisfy_failed_allocation(size, is_tlab);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::set_par_threads(uint t) {
|
||||
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
|
||||
CollectedHeap::set_par_threads(t);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class AssertNonScavengableClosure: public OopClosure {
|
||||
public:
|
||||
|
@ -364,8 +364,6 @@ public:
|
||||
// asserted to be this type.
|
||||
static GenCollectedHeap* heap();
|
||||
|
||||
void set_par_threads(uint t);
|
||||
|
||||
// Invoke the "do_oop" method of one of the closures "not_older_gens"
|
||||
// or "older_gens" on root locations for the generation at
|
||||
// "level". (The "older_gens" closure is used for scanning references
|
||||
|
Loading…
x
Reference in New Issue
Block a user