6984287: Regularize how GC parallel workers are specified
Associate number of GC workers with the workgang as opposed to the task. Reviewed-by: johnc, ysr
This commit is contained in:
parent
3af63c10ab
commit
28e56b8970
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -39,7 +39,7 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
|
|||||||
if (_generations == NULL)
|
if (_generations == NULL)
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
|
||||||
if (UseParNewGC && ParallelGCThreads > 0) {
|
if (ParNewGeneration::in_use()) {
|
||||||
if (UseAdaptiveSizePolicy) {
|
if (UseAdaptiveSizePolicy) {
|
||||||
_generations[0] = new GenerationSpec(Generation::ASParNew,
|
_generations[0] = new GenerationSpec(Generation::ASParNew,
|
||||||
_initial_gen0_size, _max_gen0_size);
|
_initial_gen0_size, _max_gen0_size);
|
||||||
@ -79,7 +79,7 @@ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
|||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
// initialize the policy counters - 2 collectors, 3 generations
|
// initialize the policy counters - 2 collectors, 3 generations
|
||||||
if (UseParNewGC && ParallelGCThreads > 0) {
|
if (ParNewGeneration::in_use()) {
|
||||||
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
|
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -102,7 +102,7 @@ void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
|||||||
|
|
||||||
assert(size_policy() != NULL, "A size policy is required");
|
assert(size_policy() != NULL, "A size policy is required");
|
||||||
// initialize the policy counters - 2 collectors, 3 generations
|
// initialize the policy counters - 2 collectors, 3 generations
|
||||||
if (UseParNewGC && ParallelGCThreads > 0) {
|
if (ParNewGeneration::in_use()) {
|
||||||
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
|
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
|
||||||
size_policy());
|
size_policy());
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,8 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||||||
checkFreeListConsistency();
|
checkFreeListConsistency();
|
||||||
|
|
||||||
// Initialize locks for parallel case.
|
// Initialize locks for parallel case.
|
||||||
if (ParallelGCThreads > 0) {
|
|
||||||
|
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||||
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
||||||
"a freelist par lock",
|
"a freelist par lock",
|
||||||
@ -1071,7 +1072,8 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
|
|||||||
// at address below "p" in finding the object that contains "p"
|
// at address below "p" in finding the object that contains "p"
|
||||||
// and those objects (if garbage) may have been modified to hold
|
// and those objects (if garbage) may have been modified to hold
|
||||||
// live range information.
|
// live range information.
|
||||||
// assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
|
// assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
|
||||||
|
// "Should be a block boundary");
|
||||||
if (FreeChunk::indicatesFreeChunk(p)) return false;
|
if (FreeChunk::indicatesFreeChunk(p)) return false;
|
||||||
klassOop k = oop(p)->klass_or_null();
|
klassOop k = oop(p)->klass_or_null();
|
||||||
if (k != NULL) {
|
if (k != NULL) {
|
||||||
@ -2932,7 +2934,9 @@ initialize_sequential_subtasks_for_rescan(int n_threads) {
|
|||||||
"n_tasks calculation incorrect");
|
"n_tasks calculation incorrect");
|
||||||
SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
||||||
assert(!pst->valid(), "Clobbering existing data?");
|
assert(!pst->valid(), "Clobbering existing data?");
|
||||||
pst->set_par_threads(n_threads);
|
// Sets the condition for completion of the subtask (how many threads
|
||||||
|
// need to finish in order to be done).
|
||||||
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks((int)n_tasks);
|
pst->set_n_tasks((int)n_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2972,6 +2976,8 @@ initialize_sequential_subtasks_for_marking(int n_threads,
|
|||||||
"n_tasks calculation incorrect");
|
"n_tasks calculation incorrect");
|
||||||
SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
SequentialSubTasksDone* pst = conc_par_seq_tasks();
|
||||||
assert(!pst->valid(), "Clobbering existing data?");
|
assert(!pst->valid(), "Clobbering existing data?");
|
||||||
pst->set_par_threads(n_threads);
|
// Sets the condition for completion of the subtask (how many threads
|
||||||
|
// need to finish in order to be done).
|
||||||
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks((int)n_tasks);
|
pst->set_n_tasks((int)n_tasks);
|
||||||
}
|
}
|
||||||
|
@ -195,7 +195,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
|||||||
"Offset of FreeChunk::_prev within FreeChunk must match"
|
"Offset of FreeChunk::_prev within FreeChunk must match"
|
||||||
" that of OopDesc::_klass within OopDesc");
|
" that of OopDesc::_klass within OopDesc");
|
||||||
)
|
)
|
||||||
if (ParallelGCThreads > 0) {
|
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||||
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
|
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
|
||||||
_par_gc_thread_states =
|
_par_gc_thread_states =
|
||||||
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
|
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
|
||||||
@ -616,7 +616,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Support for multi-threaded concurrent phases
|
// Support for multi-threaded concurrent phases
|
||||||
if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
|
if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) {
|
||||||
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
|
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
|
||||||
// just for now
|
// just for now
|
||||||
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
|
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
|
||||||
@ -628,6 +628,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
|||||||
warning("GC/CMS: _conc_workers allocation failure: "
|
warning("GC/CMS: _conc_workers allocation failure: "
|
||||||
"forcing -CMSConcurrentMTEnabled");
|
"forcing -CMSConcurrentMTEnabled");
|
||||||
CMSConcurrentMTEnabled = false;
|
CMSConcurrentMTEnabled = false;
|
||||||
|
} else {
|
||||||
|
_conc_workers->initialize_workers();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CMSConcurrentMTEnabled = false;
|
CMSConcurrentMTEnabled = false;
|
||||||
@ -936,7 +938,7 @@ void ConcurrentMarkSweepGeneration::reset_after_compaction() {
|
|||||||
// along with all the other pointers into the heap but
|
// along with all the other pointers into the heap but
|
||||||
// compaction is expected to be a rare event with
|
// compaction is expected to be a rare event with
|
||||||
// a heap using cms so don't do it without seeing the need.
|
// a heap using cms so don't do it without seeing the need.
|
||||||
if (ParallelGCThreads > 0) {
|
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||||
_par_gc_thread_states[i]->promo.reset();
|
_par_gc_thread_states[i]->promo.reset();
|
||||||
}
|
}
|
||||||
@ -2630,7 +2632,8 @@ void CMSCollector::gc_prologue(bool full) {
|
|||||||
// Should call gc_prologue_work() for all cms gens we are responsible for
|
// Should call gc_prologue_work() for all cms gens we are responsible for
|
||||||
bool registerClosure = _collectorState >= Marking
|
bool registerClosure = _collectorState >= Marking
|
||||||
&& _collectorState < Sweeping;
|
&& _collectorState < Sweeping;
|
||||||
ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
|
ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
|
||||||
|
&_modUnionClosurePar
|
||||||
: &_modUnionClosure;
|
: &_modUnionClosure;
|
||||||
_cmsGen->gc_prologue_work(full, registerClosure, muc);
|
_cmsGen->gc_prologue_work(full, registerClosure, muc);
|
||||||
_permGen->gc_prologue_work(full, registerClosure, muc);
|
_permGen->gc_prologue_work(full, registerClosure, muc);
|
||||||
@ -2731,7 +2734,7 @@ void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
|
|||||||
collector()->gc_epilogue(full);
|
collector()->gc_epilogue(full);
|
||||||
|
|
||||||
// Also reset promotion tracking in par gc thread states.
|
// Also reset promotion tracking in par gc thread states.
|
||||||
if (ParallelGCThreads > 0) {
|
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||||
_par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
|
_par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
|
||||||
}
|
}
|
||||||
@ -3731,7 +3734,6 @@ class CMSConcMarkingTerminator: public ParallelTaskTerminator {
|
|||||||
// MT Concurrent Marking Task
|
// MT Concurrent Marking Task
|
||||||
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
||||||
CMSCollector* _collector;
|
CMSCollector* _collector;
|
||||||
YieldingFlexibleWorkGang* _workers; // the whole gang
|
|
||||||
int _n_workers; // requested/desired # workers
|
int _n_workers; // requested/desired # workers
|
||||||
bool _asynch;
|
bool _asynch;
|
||||||
bool _result;
|
bool _result;
|
||||||
@ -3751,21 +3753,19 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
|||||||
CMSConcMarkingTask(CMSCollector* collector,
|
CMSConcMarkingTask(CMSCollector* collector,
|
||||||
CompactibleFreeListSpace* cms_space,
|
CompactibleFreeListSpace* cms_space,
|
||||||
CompactibleFreeListSpace* perm_space,
|
CompactibleFreeListSpace* perm_space,
|
||||||
bool asynch, int n_workers,
|
bool asynch,
|
||||||
YieldingFlexibleWorkGang* workers,
|
YieldingFlexibleWorkGang* workers,
|
||||||
OopTaskQueueSet* task_queues):
|
OopTaskQueueSet* task_queues):
|
||||||
YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
|
YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
|
||||||
_collector(collector),
|
_collector(collector),
|
||||||
_cms_space(cms_space),
|
_cms_space(cms_space),
|
||||||
_perm_space(perm_space),
|
_perm_space(perm_space),
|
||||||
_asynch(asynch), _n_workers(n_workers), _result(true),
|
_asynch(asynch), _n_workers(0), _result(true),
|
||||||
_workers(workers), _task_queues(task_queues),
|
_task_queues(task_queues),
|
||||||
_term(n_workers, task_queues, _collector, asynch),
|
_term(_n_workers, task_queues, _collector, asynch),
|
||||||
_bit_map_lock(collector->bitMapLock())
|
_bit_map_lock(collector->bitMapLock())
|
||||||
{
|
{
|
||||||
assert(n_workers <= workers->total_workers(),
|
_requested_size = _n_workers;
|
||||||
"Else termination won't work correctly today"); // XXX FIX ME!
|
|
||||||
_requested_size = n_workers;
|
|
||||||
_term.set_task(this);
|
_term.set_task(this);
|
||||||
assert(_cms_space->bottom() < _perm_space->bottom(),
|
assert(_cms_space->bottom() < _perm_space->bottom(),
|
||||||
"Finger incorrectly initialized below");
|
"Finger incorrectly initialized below");
|
||||||
@ -3781,6 +3781,10 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
|||||||
|
|
||||||
CMSConcMarkingTerminator* terminator() { return &_term; }
|
CMSConcMarkingTerminator* terminator() { return &_term; }
|
||||||
|
|
||||||
|
virtual void set_for_termination(int active_workers) {
|
||||||
|
terminator()->reset_for_reuse(active_workers);
|
||||||
|
}
|
||||||
|
|
||||||
void work(int i);
|
void work(int i);
|
||||||
|
|
||||||
virtual void coordinator_yield(); // stuff done by coordinator
|
virtual void coordinator_yield(); // stuff done by coordinator
|
||||||
@ -4220,9 +4224,12 @@ bool CMSCollector::do_marking_mt(bool asynch) {
|
|||||||
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
||||||
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
|
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
|
||||||
|
|
||||||
CMSConcMarkingTask tsk(this, cms_space, perm_space,
|
CMSConcMarkingTask tsk(this,
|
||||||
asynch, num_workers /* number requested XXX */,
|
cms_space,
|
||||||
conc_workers(), task_queues());
|
perm_space,
|
||||||
|
asynch,
|
||||||
|
conc_workers(),
|
||||||
|
task_queues());
|
||||||
|
|
||||||
// Since the actual number of workers we get may be different
|
// Since the actual number of workers we get may be different
|
||||||
// from the number we requested above, do we need to do anything different
|
// from the number we requested above, do we need to do anything different
|
||||||
@ -4326,6 +4333,10 @@ void CMSCollector::preclean() {
|
|||||||
verify_overflow_empty();
|
verify_overflow_empty();
|
||||||
_abort_preclean = false;
|
_abort_preclean = false;
|
||||||
if (CMSPrecleaningEnabled) {
|
if (CMSPrecleaningEnabled) {
|
||||||
|
// Precleaning is currently not MT but the reference processor
|
||||||
|
// may be set for MT. Disable it temporarily here.
|
||||||
|
ReferenceProcessor* rp = ref_processor();
|
||||||
|
ReferenceProcessorMTProcMutator z(rp, false);
|
||||||
_eden_chunk_index = 0;
|
_eden_chunk_index = 0;
|
||||||
size_t used = get_eden_used();
|
size_t used = get_eden_used();
|
||||||
size_t capacity = get_eden_capacity();
|
size_t capacity = get_eden_capacity();
|
||||||
@ -4918,7 +4929,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
|||||||
// dirtied since the first checkpoint in this GC cycle and prior to
|
// dirtied since the first checkpoint in this GC cycle and prior to
|
||||||
// the most recent young generation GC, minus those cleaned up by the
|
// the most recent young generation GC, minus those cleaned up by the
|
||||||
// concurrent precleaning.
|
// concurrent precleaning.
|
||||||
if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
|
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
|
||||||
TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
|
TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
|
||||||
do_remark_parallel();
|
do_remark_parallel();
|
||||||
} else {
|
} else {
|
||||||
@ -5012,7 +5023,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
|||||||
// Parallel remark task
|
// Parallel remark task
|
||||||
class CMSParRemarkTask: public AbstractGangTask {
|
class CMSParRemarkTask: public AbstractGangTask {
|
||||||
CMSCollector* _collector;
|
CMSCollector* _collector;
|
||||||
WorkGang* _workers;
|
|
||||||
int _n_workers;
|
int _n_workers;
|
||||||
CompactibleFreeListSpace* _cms_space;
|
CompactibleFreeListSpace* _cms_space;
|
||||||
CompactibleFreeListSpace* _perm_space;
|
CompactibleFreeListSpace* _perm_space;
|
||||||
@ -5025,21 +5035,21 @@ class CMSParRemarkTask: public AbstractGangTask {
|
|||||||
CMSParRemarkTask(CMSCollector* collector,
|
CMSParRemarkTask(CMSCollector* collector,
|
||||||
CompactibleFreeListSpace* cms_space,
|
CompactibleFreeListSpace* cms_space,
|
||||||
CompactibleFreeListSpace* perm_space,
|
CompactibleFreeListSpace* perm_space,
|
||||||
int n_workers, WorkGang* workers,
|
int n_workers, FlexibleWorkGang* workers,
|
||||||
OopTaskQueueSet* task_queues):
|
OopTaskQueueSet* task_queues):
|
||||||
AbstractGangTask("Rescan roots and grey objects in parallel"),
|
AbstractGangTask("Rescan roots and grey objects in parallel"),
|
||||||
_collector(collector),
|
_collector(collector),
|
||||||
_cms_space(cms_space), _perm_space(perm_space),
|
_cms_space(cms_space), _perm_space(perm_space),
|
||||||
_n_workers(n_workers),
|
_n_workers(n_workers),
|
||||||
_workers(workers),
|
|
||||||
_task_queues(task_queues),
|
_task_queues(task_queues),
|
||||||
_term(workers->total_workers(), task_queues) { }
|
_term(n_workers, task_queues) { }
|
||||||
|
|
||||||
OopTaskQueueSet* task_queues() { return _task_queues; }
|
OopTaskQueueSet* task_queues() { return _task_queues; }
|
||||||
|
|
||||||
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
||||||
|
|
||||||
ParallelTaskTerminator* terminator() { return &_term; }
|
ParallelTaskTerminator* terminator() { return &_term; }
|
||||||
|
int n_workers() { return _n_workers; }
|
||||||
|
|
||||||
void work(int i);
|
void work(int i);
|
||||||
|
|
||||||
@ -5057,6 +5067,11 @@ class CMSParRemarkTask: public AbstractGangTask {
|
|||||||
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
|
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// work_queue(i) is passed to the closure
|
||||||
|
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
|
||||||
|
// also is passed to do_dirty_card_rescan_tasks() and to
|
||||||
|
// do_work_steal() to select the i-th task_queue.
|
||||||
|
|
||||||
void CMSParRemarkTask::work(int i) {
|
void CMSParRemarkTask::work(int i) {
|
||||||
elapsedTimer _timer;
|
elapsedTimer _timer;
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
@ -5128,6 +5143,7 @@ void CMSParRemarkTask::work(int i) {
|
|||||||
|
|
||||||
// Do the rescan tasks for each of the two spaces
|
// Do the rescan tasks for each of the two spaces
|
||||||
// (cms_space and perm_space) in turn.
|
// (cms_space and perm_space) in turn.
|
||||||
|
// "i" is passed to select the "i-th" task_queue
|
||||||
do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
|
do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
|
||||||
do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
|
do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
|
||||||
_timer.stop();
|
_timer.stop();
|
||||||
@ -5150,6 +5166,7 @@ void CMSParRemarkTask::work(int i) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note that parameter "i" is not used.
|
||||||
void
|
void
|
||||||
CMSParRemarkTask::do_young_space_rescan(int i,
|
CMSParRemarkTask::do_young_space_rescan(int i,
|
||||||
Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
|
Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
|
||||||
@ -5309,8 +5326,13 @@ CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
|
|||||||
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||||
// Now check if there's any work in the overflow list
|
// Now check if there's any work in the overflow list
|
||||||
|
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
|
||||||
|
// only affects the number of attempts made to get work from the
|
||||||
|
// overflow list and does not affect the number of workers. Just
|
||||||
|
// pass ParallelGCThreads so this behavior is unchanged.
|
||||||
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
||||||
work_q)) {
|
work_q,
|
||||||
|
ParallelGCThreads)) {
|
||||||
// found something in global overflow list;
|
// found something in global overflow list;
|
||||||
// not yet ready to go stealing work from others.
|
// not yet ready to go stealing work from others.
|
||||||
// We'd like to assert(work_q->size() != 0, ...)
|
// We'd like to assert(work_q->size() != 0, ...)
|
||||||
@ -5367,11 +5389,12 @@ void CMSCollector::reset_survivor_plab_arrays() {
|
|||||||
// Merge the per-thread plab arrays into the global survivor chunk
|
// Merge the per-thread plab arrays into the global survivor chunk
|
||||||
// array which will provide the partitioning of the survivor space
|
// array which will provide the partitioning of the survivor space
|
||||||
// for CMS rescan.
|
// for CMS rescan.
|
||||||
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
|
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
|
||||||
|
int no_of_gc_threads) {
|
||||||
assert(_survivor_plab_array != NULL, "Error");
|
assert(_survivor_plab_array != NULL, "Error");
|
||||||
assert(_survivor_chunk_array != NULL, "Error");
|
assert(_survivor_chunk_array != NULL, "Error");
|
||||||
assert(_collectorState == FinalMarking, "Error");
|
assert(_collectorState == FinalMarking, "Error");
|
||||||
for (uint j = 0; j < ParallelGCThreads; j++) {
|
for (int j = 0; j < no_of_gc_threads; j++) {
|
||||||
_cursor[j] = 0;
|
_cursor[j] = 0;
|
||||||
}
|
}
|
||||||
HeapWord* top = surv->top();
|
HeapWord* top = surv->top();
|
||||||
@ -5379,7 +5402,7 @@ void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
|
|||||||
for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
|
for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
|
||||||
HeapWord* min_val = top; // Higher than any PLAB address
|
HeapWord* min_val = top; // Higher than any PLAB address
|
||||||
uint min_tid = 0; // position of min_val this round
|
uint min_tid = 0; // position of min_val this round
|
||||||
for (uint j = 0; j < ParallelGCThreads; j++) {
|
for (int j = 0; j < no_of_gc_threads; j++) {
|
||||||
ChunkArray* cur_sca = &_survivor_plab_array[j];
|
ChunkArray* cur_sca = &_survivor_plab_array[j];
|
||||||
if (_cursor[j] == cur_sca->end()) {
|
if (_cursor[j] == cur_sca->end()) {
|
||||||
continue;
|
continue;
|
||||||
@ -5413,7 +5436,7 @@ void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
|
|||||||
// Verify that we used up all the recorded entries
|
// Verify that we used up all the recorded entries
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
size_t total = 0;
|
size_t total = 0;
|
||||||
for (uint j = 0; j < ParallelGCThreads; j++) {
|
for (int j = 0; j < no_of_gc_threads; j++) {
|
||||||
assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
|
assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
|
||||||
total += _cursor[j];
|
total += _cursor[j];
|
||||||
}
|
}
|
||||||
@ -5448,13 +5471,15 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|||||||
// Each valid entry in [0, _eden_chunk_index) represents a task.
|
// Each valid entry in [0, _eden_chunk_index) represents a task.
|
||||||
size_t n_tasks = _eden_chunk_index + 1;
|
size_t n_tasks = _eden_chunk_index + 1;
|
||||||
assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
|
assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
|
||||||
pst->set_par_threads(n_threads);
|
// Sets the condition for completion of the subtask (how many threads
|
||||||
|
// need to finish in order to be done).
|
||||||
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks((int)n_tasks);
|
pst->set_n_tasks((int)n_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge the survivor plab arrays into _survivor_chunk_array
|
// Merge the survivor plab arrays into _survivor_chunk_array
|
||||||
if (_survivor_plab_array != NULL) {
|
if (_survivor_plab_array != NULL) {
|
||||||
merge_survivor_plab_arrays(dng->from());
|
merge_survivor_plab_arrays(dng->from(), n_threads);
|
||||||
} else {
|
} else {
|
||||||
assert(_survivor_chunk_index == 0, "Error");
|
assert(_survivor_chunk_index == 0, "Error");
|
||||||
}
|
}
|
||||||
@ -5463,7 +5488,9 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|||||||
{
|
{
|
||||||
SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
|
SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
|
||||||
assert(!pst->valid(), "Clobbering existing data?");
|
assert(!pst->valid(), "Clobbering existing data?");
|
||||||
pst->set_par_threads(n_threads);
|
// Sets the condition for completion of the subtask (how many threads
|
||||||
|
// need to finish in order to be done).
|
||||||
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks(1);
|
pst->set_n_tasks(1);
|
||||||
assert(pst->valid(), "Error");
|
assert(pst->valid(), "Error");
|
||||||
}
|
}
|
||||||
@ -5474,7 +5501,9 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|||||||
assert(!pst->valid(), "Clobbering existing data?");
|
assert(!pst->valid(), "Clobbering existing data?");
|
||||||
size_t n_tasks = _survivor_chunk_index + 1;
|
size_t n_tasks = _survivor_chunk_index + 1;
|
||||||
assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
|
assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
|
||||||
pst->set_par_threads(n_threads);
|
// Sets the condition for completion of the subtask (how many threads
|
||||||
|
// need to finish in order to be done).
|
||||||
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks((int)n_tasks);
|
pst->set_n_tasks((int)n_tasks);
|
||||||
assert(pst->valid(), "Error");
|
assert(pst->valid(), "Error");
|
||||||
}
|
}
|
||||||
@ -5483,7 +5512,7 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|||||||
// Parallel version of remark
|
// Parallel version of remark
|
||||||
void CMSCollector::do_remark_parallel() {
|
void CMSCollector::do_remark_parallel() {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
WorkGang* workers = gch->workers();
|
FlexibleWorkGang* workers = gch->workers();
|
||||||
assert(workers != NULL, "Need parallel worker threads.");
|
assert(workers != NULL, "Need parallel worker threads.");
|
||||||
int n_workers = workers->total_workers();
|
int n_workers = workers->total_workers();
|
||||||
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
||||||
@ -5636,13 +5665,11 @@ void CMSCollector::do_remark_non_parallel() {
|
|||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
// Parallel Reference Processing Task Proxy Class
|
// Parallel Reference Processing Task Proxy Class
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
class CMSRefProcTaskProxy: public AbstractGangTask {
|
class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
|
||||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||||
CMSCollector* _collector;
|
CMSCollector* _collector;
|
||||||
CMSBitMap* _mark_bit_map;
|
CMSBitMap* _mark_bit_map;
|
||||||
const MemRegion _span;
|
const MemRegion _span;
|
||||||
OopTaskQueueSet* _task_queues;
|
|
||||||
ParallelTaskTerminator _term;
|
|
||||||
ProcessTask& _task;
|
ProcessTask& _task;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -5650,24 +5677,21 @@ public:
|
|||||||
CMSCollector* collector,
|
CMSCollector* collector,
|
||||||
const MemRegion& span,
|
const MemRegion& span,
|
||||||
CMSBitMap* mark_bit_map,
|
CMSBitMap* mark_bit_map,
|
||||||
int total_workers,
|
AbstractWorkGang* workers,
|
||||||
OopTaskQueueSet* task_queues):
|
OopTaskQueueSet* task_queues):
|
||||||
AbstractGangTask("Process referents by policy in parallel"),
|
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
|
||||||
|
task_queues),
|
||||||
_task(task),
|
_task(task),
|
||||||
_collector(collector), _span(span), _mark_bit_map(mark_bit_map),
|
_collector(collector), _span(span), _mark_bit_map(mark_bit_map)
|
||||||
_task_queues(task_queues),
|
|
||||||
_term(total_workers, task_queues)
|
|
||||||
{
|
{
|
||||||
assert(_collector->_span.equals(_span) && !_span.is_empty(),
|
assert(_collector->_span.equals(_span) && !_span.is_empty(),
|
||||||
"Inconsistency in _span");
|
"Inconsistency in _span");
|
||||||
}
|
}
|
||||||
|
|
||||||
OopTaskQueueSet* task_queues() { return _task_queues; }
|
OopTaskQueueSet* task_queues() { return queues(); }
|
||||||
|
|
||||||
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
||||||
|
|
||||||
ParallelTaskTerminator* terminator() { return &_term; }
|
|
||||||
|
|
||||||
void do_work_steal(int i,
|
void do_work_steal(int i,
|
||||||
CMSParDrainMarkingStackClosure* drain,
|
CMSParDrainMarkingStackClosure* drain,
|
||||||
CMSParKeepAliveClosure* keep_alive,
|
CMSParKeepAliveClosure* keep_alive,
|
||||||
@ -5739,8 +5763,13 @@ void CMSRefProcTaskProxy::do_work_steal(int i,
|
|||||||
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||||
// Now check if there's any work in the overflow list
|
// Now check if there's any work in the overflow list
|
||||||
|
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
|
||||||
|
// only affects the number of attempts made to get work from the
|
||||||
|
// overflow list and does not affect the number of workers. Just
|
||||||
|
// pass ParallelGCThreads so this behavior is unchanged.
|
||||||
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
||||||
work_q)) {
|
work_q,
|
||||||
|
ParallelGCThreads)) {
|
||||||
// Found something in global overflow list;
|
// Found something in global overflow list;
|
||||||
// not yet ready to go stealing work from others.
|
// not yet ready to go stealing work from others.
|
||||||
// We'd like to assert(work_q->size() != 0, ...)
|
// We'd like to assert(work_q->size() != 0, ...)
|
||||||
@ -5773,13 +5802,12 @@ void CMSRefProcTaskProxy::do_work_steal(int i,
|
|||||||
void CMSRefProcTaskExecutor::execute(ProcessTask& task)
|
void CMSRefProcTaskExecutor::execute(ProcessTask& task)
|
||||||
{
|
{
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
WorkGang* workers = gch->workers();
|
FlexibleWorkGang* workers = gch->workers();
|
||||||
assert(workers != NULL, "Need parallel worker threads.");
|
assert(workers != NULL, "Need parallel worker threads.");
|
||||||
int n_workers = workers->total_workers();
|
|
||||||
CMSRefProcTaskProxy rp_task(task, &_collector,
|
CMSRefProcTaskProxy rp_task(task, &_collector,
|
||||||
_collector.ref_processor()->span(),
|
_collector.ref_processor()->span(),
|
||||||
_collector.markBitMap(),
|
_collector.markBitMap(),
|
||||||
n_workers, _collector.task_queues());
|
workers, _collector.task_queues());
|
||||||
workers->run_task(&rp_task);
|
workers->run_task(&rp_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5787,7 +5815,7 @@ void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
|
|||||||
{
|
{
|
||||||
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
WorkGang* workers = gch->workers();
|
FlexibleWorkGang* workers = gch->workers();
|
||||||
assert(workers != NULL, "Need parallel worker threads.");
|
assert(workers != NULL, "Need parallel worker threads.");
|
||||||
CMSRefEnqueueTaskProxy enq_task(task);
|
CMSRefEnqueueTaskProxy enq_task(task);
|
||||||
workers->run_task(&enq_task);
|
workers->run_task(&enq_task);
|
||||||
@ -5814,6 +5842,14 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
|||||||
{
|
{
|
||||||
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
|
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
|
||||||
if (rp->processing_is_mt()) {
|
if (rp->processing_is_mt()) {
|
||||||
|
// Set the degree of MT here. If the discovery is done MT, there
|
||||||
|
// may have been a different number of threads doing the discovery
|
||||||
|
// and a different number of discovered lists may have Ref objects.
|
||||||
|
// That is OK as long as the Reference lists are balanced (see
|
||||||
|
// balance_all_queues() and balance_queues()).
|
||||||
|
|
||||||
|
|
||||||
|
rp->set_mt_degree(ParallelGCThreads);
|
||||||
CMSRefProcTaskExecutor task_executor(*this);
|
CMSRefProcTaskExecutor task_executor(*this);
|
||||||
rp->process_discovered_references(&_is_alive_closure,
|
rp->process_discovered_references(&_is_alive_closure,
|
||||||
&cmsKeepAliveClosure,
|
&cmsKeepAliveClosure,
|
||||||
@ -5874,6 +5910,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
|||||||
|
|
||||||
rp->set_enqueuing_is_done(true);
|
rp->set_enqueuing_is_done(true);
|
||||||
if (rp->processing_is_mt()) {
|
if (rp->processing_is_mt()) {
|
||||||
|
rp->balance_all_queues();
|
||||||
CMSRefProcTaskExecutor task_executor(*this);
|
CMSRefProcTaskExecutor task_executor(*this);
|
||||||
rp->enqueue_discovered_references(&task_executor);
|
rp->enqueue_discovered_references(&task_executor);
|
||||||
} else {
|
} else {
|
||||||
@ -8708,7 +8745,8 @@ bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
|
|||||||
// similar changes might be needed.
|
// similar changes might be needed.
|
||||||
// CR 6797058 has been filed to consolidate the common code.
|
// CR 6797058 has been filed to consolidate the common code.
|
||||||
bool CMSCollector::par_take_from_overflow_list(size_t num,
|
bool CMSCollector::par_take_from_overflow_list(size_t num,
|
||||||
OopTaskQueue* work_q) {
|
OopTaskQueue* work_q,
|
||||||
|
int no_of_gc_threads) {
|
||||||
assert(work_q->size() == 0, "First empty local work queue");
|
assert(work_q->size() == 0, "First empty local work queue");
|
||||||
assert(num < work_q->max_elems(), "Can't bite more than we can chew");
|
assert(num < work_q->max_elems(), "Can't bite more than we can chew");
|
||||||
if (_overflow_list == NULL) {
|
if (_overflow_list == NULL) {
|
||||||
@ -8717,7 +8755,9 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
|
|||||||
// Grab the entire list; we'll put back a suffix
|
// Grab the entire list; we'll put back a suffix
|
||||||
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
||||||
Thread* tid = Thread::current();
|
Thread* tid = Thread::current();
|
||||||
size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
|
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
|
||||||
|
// set to ParallelGCThreads.
|
||||||
|
size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
|
||||||
size_t sleep_time_millis = MAX2((size_t)1, num/100);
|
size_t sleep_time_millis = MAX2((size_t)1, num/100);
|
||||||
// If the list is busy, we spin for a short while,
|
// If the list is busy, we spin for a short while,
|
||||||
// sleeping between attempts to get the list.
|
// sleeping between attempts to get the list.
|
||||||
@ -9256,4 +9296,3 @@ TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStat
|
|||||||
true /* recordGCEndTime */,
|
true /* recordGCEndTime */,
|
||||||
true /* countCollection */ );
|
true /* countCollection */ );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -729,7 +729,9 @@ class CMSCollector: public CHeapObj {
|
|||||||
|
|
||||||
// Support for marking stack overflow handling
|
// Support for marking stack overflow handling
|
||||||
bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
|
bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
|
||||||
bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
|
bool par_take_from_overflow_list(size_t num,
|
||||||
|
OopTaskQueue* to_work_q,
|
||||||
|
int no_of_gc_threads);
|
||||||
void push_on_overflow_list(oop p);
|
void push_on_overflow_list(oop p);
|
||||||
void par_push_on_overflow_list(oop p);
|
void par_push_on_overflow_list(oop p);
|
||||||
// the following is, obviously, not, in general, "MT-stable"
|
// the following is, obviously, not, in general, "MT-stable"
|
||||||
@ -768,7 +770,7 @@ class CMSCollector: public CHeapObj {
|
|||||||
void abortable_preclean(); // Preclean while looking for possible abort
|
void abortable_preclean(); // Preclean while looking for possible abort
|
||||||
void initialize_sequential_subtasks_for_young_gen_rescan(int i);
|
void initialize_sequential_subtasks_for_young_gen_rescan(int i);
|
||||||
// Helper function for above; merge-sorts the per-thread plab samples
|
// Helper function for above; merge-sorts the per-thread plab samples
|
||||||
void merge_survivor_plab_arrays(ContiguousSpace* surv);
|
void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
|
||||||
// Resets (i.e. clears) the per-thread plab sample vectors
|
// Resets (i.e. clears) the per-thread plab sample vectors
|
||||||
void reset_survivor_plab_arrays();
|
void reset_survivor_plab_arrays();
|
||||||
|
|
||||||
|
@ -583,10 +583,13 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
guarantee(parallel_marking_threads() > 0, "peace of mind");
|
guarantee(parallel_marking_threads() > 0, "peace of mind");
|
||||||
_parallel_workers = new WorkGang("G1 Parallel Marking Threads",
|
_parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
|
||||||
(int) parallel_marking_threads(), false, true);
|
(int) _parallel_marking_threads, false, true);
|
||||||
if (_parallel_workers == NULL)
|
if (_parallel_workers == NULL) {
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
|
} else {
|
||||||
|
_parallel_workers->initialize_workers();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// so that the call below can read a sensible value
|
// so that the call below can read a sensible value
|
||||||
@ -1451,7 +1454,7 @@ public:
|
|||||||
_bm, _g1h->concurrent_mark(),
|
_bm, _g1h->concurrent_mark(),
|
||||||
_region_bm, _card_bm);
|
_region_bm, _card_bm);
|
||||||
calccl.no_yield();
|
calccl.no_yield();
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
_g1h->heap_region_par_iterate_chunked(&calccl, i,
|
_g1h->heap_region_par_iterate_chunked(&calccl, i,
|
||||||
HeapRegion::FinalCountClaimValue);
|
HeapRegion::FinalCountClaimValue);
|
||||||
} else {
|
} else {
|
||||||
@ -1531,7 +1534,7 @@ public:
|
|||||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
|
G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
|
||||||
&_par_cleanup_thread_state[i]->list,
|
&_par_cleanup_thread_state[i]->list,
|
||||||
i);
|
i);
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
|
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
|
||||||
HeapRegion::NoteEndClaimValue);
|
HeapRegion::NoteEndClaimValue);
|
||||||
} else {
|
} else {
|
||||||
@ -1575,7 +1578,7 @@ public:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
void work(int i) {
|
void work(int i) {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
_g1rs->scrub_par(_region_bm, _card_bm, i,
|
_g1rs->scrub_par(_region_bm, _card_bm, i,
|
||||||
HeapRegion::ScrubRemSetClaimValue);
|
HeapRegion::ScrubRemSetClaimValue);
|
||||||
} else {
|
} else {
|
||||||
@ -1647,7 +1650,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
// Do counting once more with the world stopped for good measure.
|
// Do counting once more with the world stopped for good measure.
|
||||||
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
|
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
|
||||||
&_region_bm, &_card_bm);
|
&_region_bm, &_card_bm);
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
assert(g1h->check_heap_region_claim_values(
|
assert(g1h->check_heap_region_claim_values(
|
||||||
HeapRegion::InitialClaimValue),
|
HeapRegion::InitialClaimValue),
|
||||||
"sanity check");
|
"sanity check");
|
||||||
@ -1695,7 +1698,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
// Note end of marking in all heap regions.
|
// Note end of marking in all heap regions.
|
||||||
double note_end_start = os::elapsedTime();
|
double note_end_start = os::elapsedTime();
|
||||||
G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
|
G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
int n_workers = g1h->workers()->total_workers();
|
int n_workers = g1h->workers()->total_workers();
|
||||||
g1h->set_par_threads(n_workers);
|
g1h->set_par_threads(n_workers);
|
||||||
g1h->workers()->run_task(&g1_par_note_end_task);
|
g1h->workers()->run_task(&g1_par_note_end_task);
|
||||||
@ -1720,7 +1723,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
if (G1ScrubRemSets) {
|
if (G1ScrubRemSets) {
|
||||||
double rs_scrub_start = os::elapsedTime();
|
double rs_scrub_start = os::elapsedTime();
|
||||||
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
|
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
int n_workers = g1h->workers()->total_workers();
|
int n_workers = g1h->workers()->total_workers();
|
||||||
g1h->set_par_threads(n_workers);
|
g1h->set_par_threads(n_workers);
|
||||||
g1h->workers()->run_task(&g1_par_scrub_rs_task);
|
g1h->workers()->run_task(&g1_par_scrub_rs_task);
|
||||||
@ -1934,7 +1937,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
|||||||
|
|
||||||
g1h->ensure_parsability(false);
|
g1h->ensure_parsability(false);
|
||||||
|
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
G1CollectedHeap::StrongRootsScope srs(g1h);
|
G1CollectedHeap::StrongRootsScope srs(g1h);
|
||||||
// this is remark, so we'll use up all available threads
|
// this is remark, so we'll use up all available threads
|
||||||
int active_workers = ParallelGCThreads;
|
int active_workers = ParallelGCThreads;
|
||||||
@ -3369,14 +3372,14 @@ void CMTask::drain_satb_buffers() {
|
|||||||
|
|
||||||
CMObjectClosure oc(this);
|
CMObjectClosure oc(this);
|
||||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||||
if (ParallelGCThreads > 0)
|
if (G1CollectedHeap::use_parallel_gc_threads())
|
||||||
satb_mq_set.set_par_closure(_task_id, &oc);
|
satb_mq_set.set_par_closure(_task_id, &oc);
|
||||||
else
|
else
|
||||||
satb_mq_set.set_closure(&oc);
|
satb_mq_set.set_closure(&oc);
|
||||||
|
|
||||||
// This keeps claiming and applying the closure to completed buffers
|
// This keeps claiming and applying the closure to completed buffers
|
||||||
// until we run out of buffers or we need to abort.
|
// until we run out of buffers or we need to abort.
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
while (!has_aborted() &&
|
while (!has_aborted() &&
|
||||||
satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
|
satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
|
||||||
if (_cm->verbose_medium())
|
if (_cm->verbose_medium())
|
||||||
@ -3396,7 +3399,7 @@ void CMTask::drain_satb_buffers() {
|
|||||||
|
|
||||||
if (!concurrent() && !has_aborted()) {
|
if (!concurrent() && !has_aborted()) {
|
||||||
// We should only do this during remark.
|
// We should only do this during remark.
|
||||||
if (ParallelGCThreads > 0)
|
if (G1CollectedHeap::use_parallel_gc_threads())
|
||||||
satb_mq_set.par_iterate_closure_all_threads(_task_id);
|
satb_mq_set.par_iterate_closure_all_threads(_task_id);
|
||||||
else
|
else
|
||||||
satb_mq_set.iterate_closure_all_threads();
|
satb_mq_set.iterate_closure_all_threads();
|
||||||
@ -3408,7 +3411,7 @@ void CMTask::drain_satb_buffers() {
|
|||||||
concurrent() ||
|
concurrent() ||
|
||||||
satb_mq_set.completed_buffers_num() == 0, "invariant");
|
satb_mq_set.completed_buffers_num() == 0, "invariant");
|
||||||
|
|
||||||
if (ParallelGCThreads > 0)
|
if (G1CollectedHeap::use_parallel_gc_threads())
|
||||||
satb_mq_set.set_par_closure(_task_id, NULL);
|
satb_mq_set.set_par_closure(_task_id, NULL);
|
||||||
else
|
else
|
||||||
satb_mq_set.set_closure(NULL);
|
satb_mq_set.set_closure(NULL);
|
||||||
|
@ -961,7 +961,8 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rebuild remembered sets of all regions.
|
// Rebuild remembered sets of all regions.
|
||||||
if (ParallelGCThreads > 0) {
|
|
||||||
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
ParRebuildRSTask rebuild_rs_task(this);
|
ParRebuildRSTask rebuild_rs_task(this);
|
||||||
assert(check_heap_region_claim_values(
|
assert(check_heap_region_claim_values(
|
||||||
HeapRegion::InitialClaimValue), "sanity check");
|
HeapRegion::InitialClaimValue), "sanity check");
|
||||||
@ -1960,7 +1961,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
|||||||
int worker,
|
int worker,
|
||||||
jint claim_value) {
|
jint claim_value) {
|
||||||
const size_t regions = n_regions();
|
const size_t regions = n_regions();
|
||||||
const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
|
const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
|
||||||
// try to spread out the starting points of the workers
|
// try to spread out the starting points of the workers
|
||||||
const size_t start_index = regions / worker_num * (size_t) worker;
|
const size_t start_index = regions / worker_num * (size_t) worker;
|
||||||
|
|
||||||
@ -2527,7 +2528,7 @@ void G1CollectedHeap::print_on_extended(outputStream* st) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
workers()->print_worker_threads_on(st);
|
workers()->print_worker_threads_on(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2543,7 +2544,7 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
workers()->threads_do(tc);
|
workers()->threads_do(tc);
|
||||||
}
|
}
|
||||||
tc->do_thread(_cmThread);
|
tc->do_thread(_cmThread);
|
||||||
@ -3083,7 +3084,7 @@ void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
|||||||
if (r != NULL) {
|
if (r != NULL) {
|
||||||
r_used = r->used();
|
r_used = r->used();
|
||||||
|
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
// need to take the lock to guard against two threads calling
|
// need to take the lock to guard against two threads calling
|
||||||
// get_gc_alloc_region concurrently (very unlikely but...)
|
// get_gc_alloc_region concurrently (very unlikely but...)
|
||||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||||
@ -4182,6 +4183,8 @@ public:
|
|||||||
|
|
||||||
// *** Common G1 Evacuation Stuff
|
// *** Common G1 Evacuation Stuff
|
||||||
|
|
||||||
|
// This method is run in a GC worker.
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectedHeap::
|
G1CollectedHeap::
|
||||||
g1_process_strong_roots(bool collecting_perm_gen,
|
g1_process_strong_roots(bool collecting_perm_gen,
|
||||||
@ -4259,7 +4262,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
void G1CollectedHeap::save_marks() {
|
void G1CollectedHeap::save_marks() {
|
||||||
if (ParallelGCThreads == 0) {
|
if (!CollectedHeap::use_parallel_gc_threads()) {
|
||||||
SaveMarksClosure sm;
|
SaveMarksClosure sm;
|
||||||
heap_region_iterate(&sm);
|
heap_region_iterate(&sm);
|
||||||
}
|
}
|
||||||
@ -4284,7 +4287,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
|||||||
|
|
||||||
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
|
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
|
||||||
double start_par = os::elapsedTime();
|
double start_par = os::elapsedTime();
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
// The individual threads will set their evac-failure closures.
|
// The individual threads will set their evac-failure closures.
|
||||||
StrongRootsScope srs(this);
|
StrongRootsScope srs(this);
|
||||||
if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
|
if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
|
||||||
|
@ -656,6 +656,9 @@ protected:
|
|||||||
bool _unclean_regions_coming;
|
bool _unclean_regions_coming;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
|
||||||
|
|
||||||
void set_refine_cte_cl_concurrency(bool concurrent);
|
void set_refine_cte_cl_concurrency(bool concurrent);
|
||||||
|
|
||||||
RefToScanQueue *task_queue(int i) const;
|
RefToScanQueue *task_queue(int i) const;
|
||||||
@ -684,7 +687,7 @@ public:
|
|||||||
|
|
||||||
void set_par_threads(int t) {
|
void set_par_threads(int t) {
|
||||||
SharedHeap::set_par_threads(t);
|
SharedHeap::set_par_threads(t);
|
||||||
_process_strong_tasks->set_par_threads(t);
|
_process_strong_tasks->set_n_threads(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual CollectedHeap::Name kind() const {
|
virtual CollectedHeap::Name kind() const {
|
||||||
|
@ -72,7 +72,10 @@ static double non_young_other_cost_per_region_ms_defaults[] = {
|
|||||||
// </NEW PREDICTION>
|
// </NEW PREDICTION>
|
||||||
|
|
||||||
G1CollectorPolicy::G1CollectorPolicy() :
|
G1CollectorPolicy::G1CollectorPolicy() :
|
||||||
_parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1),
|
_parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
|
||||||
|
? ParallelGCThreads : 1),
|
||||||
|
|
||||||
|
|
||||||
_n_pauses(0),
|
_n_pauses(0),
|
||||||
_recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
_recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||||
_recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
_recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||||
@ -1073,7 +1076,7 @@ void G1CollectorPolicy::print_stats (int level,
|
|||||||
}
|
}
|
||||||
|
|
||||||
double G1CollectorPolicy::avg_value (double* data) {
|
double G1CollectorPolicy::avg_value (double* data) {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
double ret = 0.0;
|
double ret = 0.0;
|
||||||
for (uint i = 0; i < ParallelGCThreads; ++i)
|
for (uint i = 0; i < ParallelGCThreads; ++i)
|
||||||
ret += data[i];
|
ret += data[i];
|
||||||
@ -1084,7 +1087,7 @@ double G1CollectorPolicy::avg_value (double* data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
double G1CollectorPolicy::max_value (double* data) {
|
double G1CollectorPolicy::max_value (double* data) {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
double ret = data[0];
|
double ret = data[0];
|
||||||
for (uint i = 1; i < ParallelGCThreads; ++i)
|
for (uint i = 1; i < ParallelGCThreads; ++i)
|
||||||
if (data[i] > ret)
|
if (data[i] > ret)
|
||||||
@ -1096,7 +1099,7 @@ double G1CollectorPolicy::max_value (double* data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
double G1CollectorPolicy::sum_of_values (double* data) {
|
double G1CollectorPolicy::sum_of_values (double* data) {
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
double sum = 0.0;
|
double sum = 0.0;
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++)
|
for (uint i = 0; i < ParallelGCThreads; i++)
|
||||||
sum += data[i];
|
sum += data[i];
|
||||||
@ -1110,7 +1113,7 @@ double G1CollectorPolicy::max_sum (double* data1,
|
|||||||
double* data2) {
|
double* data2) {
|
||||||
double ret = data1[0] + data2[0];
|
double ret = data1[0] + data2[0];
|
||||||
|
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
for (uint i = 1; i < ParallelGCThreads; ++i) {
|
for (uint i = 1; i < ParallelGCThreads; ++i) {
|
||||||
double data = data1[i] + data2[i];
|
double data = data1[i] + data2[i];
|
||||||
if (data > ret)
|
if (data > ret)
|
||||||
@ -1126,7 +1129,7 @@ double G1CollectorPolicy::max_sum (double* data1,
|
|||||||
void G1CollectorPolicy::record_collection_pause_end() {
|
void G1CollectorPolicy::record_collection_pause_end() {
|
||||||
double end_time_sec = os::elapsedTime();
|
double end_time_sec = os::elapsedTime();
|
||||||
double elapsed_ms = _last_pause_time_ms;
|
double elapsed_ms = _last_pause_time_ms;
|
||||||
bool parallel = ParallelGCThreads > 0;
|
bool parallel = G1CollectedHeap::use_parallel_gc_threads();
|
||||||
double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
|
double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
|
||||||
size_t rs_size =
|
size_t rs_size =
|
||||||
_cur_collection_pause_used_regions_at_start - collection_set_size();
|
_cur_collection_pause_used_regions_at_start - collection_set_size();
|
||||||
@ -1941,7 +1944,7 @@ G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
|
|||||||
// Further, we're now always doing parallel collection. But I'm still
|
// Further, we're now always doing parallel collection. But I'm still
|
||||||
// leaving this here as a placeholder for a more precise assertion later.
|
// leaving this here as a placeholder for a more precise assertion later.
|
||||||
// (DLD, 10/05.)
|
// (DLD, 10/05.)
|
||||||
assert((true || ParallelGCThreads > 0) ||
|
assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
|
||||||
_g1->evacuation_failed() ||
|
_g1->evacuation_failed() ||
|
||||||
recent_survival_rate <= 1.0, "Or bad frac");
|
recent_survival_rate <= 1.0, "Or bad frac");
|
||||||
return recent_survival_rate;
|
return recent_survival_rate;
|
||||||
@ -1961,7 +1964,7 @@ G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
|
|||||||
// Further, we're now always doing parallel collection. But I'm still
|
// Further, we're now always doing parallel collection. But I'm still
|
||||||
// leaving this here as a placeholder for a more precise assertion later.
|
// leaving this here as a placeholder for a more precise assertion later.
|
||||||
// (DLD, 10/05.)
|
// (DLD, 10/05.)
|
||||||
assert((true || ParallelGCThreads > 0) ||
|
assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
|
||||||
last_survival_rate <= 1.0, "Or bad frac");
|
last_survival_rate <= 1.0, "Or bad frac");
|
||||||
return last_survival_rate;
|
return last_survival_rate;
|
||||||
} else {
|
} else {
|
||||||
@ -2121,7 +2124,7 @@ void G1CollectorPolicy::check_other_times(int level,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||||
bool parallel = ParallelGCThreads > 0;
|
bool parallel = G1CollectedHeap::use_parallel_gc_threads();
|
||||||
MainBodySummary* body_summary = summary->main_body_summary();
|
MainBodySummary* body_summary = summary->main_body_summary();
|
||||||
if (summary->get_total_seq()->num() > 0) {
|
if (summary->get_total_seq()->num() > 0) {
|
||||||
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
|
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
|
||||||
@ -2559,7 +2562,7 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
|||||||
gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
|
gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
|
||||||
(clear_marked_end - start)*1000.0);
|
(clear_marked_end - start)*1000.0);
|
||||||
}
|
}
|
||||||
if (ParallelGCThreads > 0) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
const size_t OverpartitionFactor = 4;
|
const size_t OverpartitionFactor = 4;
|
||||||
const size_t MinWorkUnit = 8;
|
const size_t MinWorkUnit = 8;
|
||||||
const size_t WorkUnit =
|
const size_t WorkUnit =
|
||||||
|
@ -523,7 +523,7 @@ prepare_for_oops_into_collection_set_do() {
|
|||||||
assert(!_traversal_in_progress, "Invariant between iterations.");
|
assert(!_traversal_in_progress, "Invariant between iterations.");
|
||||||
set_traversal(true);
|
set_traversal(true);
|
||||||
if (ParallelGCThreads > 0) {
|
if (ParallelGCThreads > 0) {
|
||||||
_seq_task->set_par_threads((int)n_workers());
|
_seq_task->set_n_threads((int)n_workers());
|
||||||
}
|
}
|
||||||
guarantee( _cards_scanned == NULL, "invariant" );
|
guarantee( _cards_scanned == NULL, "invariant" );
|
||||||
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
|
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -44,7 +44,7 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
|||||||
|
|
||||||
int n_strides = n_threads * StridesPerThread;
|
int n_strides = n_threads * StridesPerThread;
|
||||||
SequentialSubTasksDone* pst = sp->par_seq_tasks();
|
SequentialSubTasksDone* pst = sp->par_seq_tasks();
|
||||||
pst->set_par_threads(n_threads);
|
pst->set_n_threads(n_threads);
|
||||||
pst->set_n_tasks(n_strides);
|
pst->set_n_tasks(n_strides);
|
||||||
|
|
||||||
int stride = 0;
|
int stride = 0;
|
||||||
|
@ -1533,3 +1533,7 @@ void ParNewGeneration::ref_processor_init()
|
|||||||
const char* ParNewGeneration::name() const {
|
const char* ParNewGeneration::name() const {
|
||||||
return "par new generation";
|
return "par new generation";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ParNewGeneration::in_use() {
|
||||||
|
return UseParNewGC && ParallelGCThreads > 0;
|
||||||
|
}
|
||||||
|
@ -350,6 +350,8 @@ class ParNewGeneration: public DefNewGeneration {
|
|||||||
delete _task_queues;
|
delete _task_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool in_use();
|
||||||
|
|
||||||
virtual void ref_processor_init();
|
virtual void ref_processor_init();
|
||||||
virtual Generation::Name kind() { return Generation::ParNew; }
|
virtual Generation::Name kind() { return Generation::ParNew; }
|
||||||
virtual const char* name() const;
|
virtual const char* name() const;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -242,7 +242,11 @@ class UpdateDensePrefixTask : public GCTask {
|
|||||||
//
|
//
|
||||||
|
|
||||||
class DrainStacksCompactionTask : public GCTask {
|
class DrainStacksCompactionTask : public GCTask {
|
||||||
|
uint _stack_index;
|
||||||
|
uint stack_index() { return _stack_index; }
|
||||||
public:
|
public:
|
||||||
|
DrainStacksCompactionTask(uint stack_index) : GCTask(),
|
||||||
|
_stack_index(stack_index) {};
|
||||||
char* name() { return (char *)"drain-region-task"; }
|
char* name() { return (char *)"drain-region-task"; }
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
};
|
};
|
||||||
|
@ -2449,7 +2449,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
|||||||
|
|
||||||
const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
|
const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
|
||||||
for (unsigned int j = 0; j < task_count; j++) {
|
for (unsigned int j = 0; j < task_count; j++) {
|
||||||
q->enqueue(new DrainStacksCompactionTask());
|
q->enqueue(new DrainStacksCompactionTask(j));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all regions that are available (can be filled immediately) and
|
// Find all regions that are available (can be filled immediately) and
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -34,7 +34,9 @@ size_t CollectedHeap::_filler_array_max_size = 0;
|
|||||||
|
|
||||||
// Memory state functions.
|
// Memory state functions.
|
||||||
|
|
||||||
CollectedHeap::CollectedHeap()
|
|
||||||
|
CollectedHeap::CollectedHeap() : _n_par_threads(0)
|
||||||
|
|
||||||
{
|
{
|
||||||
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
|
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
|
||||||
const size_t elements_per_word = HeapWordSize / sizeof(jint);
|
const size_t elements_per_word = HeapWordSize / sizeof(jint);
|
||||||
|
@ -59,6 +59,8 @@ class CollectedHeap : public CHeapObj {
|
|||||||
MemRegion _reserved;
|
MemRegion _reserved;
|
||||||
BarrierSet* _barrier_set;
|
BarrierSet* _barrier_set;
|
||||||
bool _is_gc_active;
|
bool _is_gc_active;
|
||||||
|
int _n_par_threads;
|
||||||
|
|
||||||
unsigned int _total_collections; // ... started
|
unsigned int _total_collections; // ... started
|
||||||
unsigned int _total_full_collections; // ... started
|
unsigned int _total_full_collections; // ... started
|
||||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
||||||
@ -293,6 +295,12 @@ class CollectedHeap : public CHeapObj {
|
|||||||
}
|
}
|
||||||
GCCause::Cause gc_cause() { return _gc_cause; }
|
GCCause::Cause gc_cause() { return _gc_cause; }
|
||||||
|
|
||||||
|
// Number of threads currently working on GC tasks.
|
||||||
|
int n_par_threads() { return _n_par_threads; }
|
||||||
|
|
||||||
|
// May be overridden to set additional parallelism.
|
||||||
|
virtual void set_par_threads(int t) { _n_par_threads = t; };
|
||||||
|
|
||||||
// Preload classes into the shared portion of the heap, and then dump
|
// Preload classes into the shared portion of the heap, and then dump
|
||||||
// that data to a file so that it can be loaded directly by another
|
// that data to a file so that it can be loaded directly by another
|
||||||
// VM (then terminate).
|
// VM (then terminate).
|
||||||
@ -606,6 +614,14 @@ class CollectedHeap : public CHeapObj {
|
|||||||
return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
|
return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
public:
|
||||||
|
// This is a convenience method that is used in cases where
|
||||||
|
// the actual number of GC worker threads is not pertinent but
|
||||||
|
// only whether there more than 0. Use of this method helps
|
||||||
|
// reduce the occurrence of ParallelGCThreads to uses where the
|
||||||
|
// actual number may be germane.
|
||||||
|
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class to set and reset the GC cause for a CollectedHeap.
|
// Class to set and reset the GC cause for a CollectedHeap.
|
||||||
|
@ -4721,6 +4721,7 @@ workgroup.cpp allocation.inline.hpp
|
|||||||
workgroup.cpp os.hpp
|
workgroup.cpp os.hpp
|
||||||
workgroup.cpp workgroup.hpp
|
workgroup.cpp workgroup.hpp
|
||||||
|
|
||||||
|
workgroup.hpp taskqueue.hpp
|
||||||
workgroup.hpp thread_<os_family>.inline.hpp
|
workgroup.hpp thread_<os_family>.inline.hpp
|
||||||
|
|
||||||
xmlstream.cpp allocation.hpp
|
xmlstream.cpp allocation.hpp
|
||||||
|
@ -676,7 +676,7 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
|
|||||||
|
|
||||||
void GenCollectedHeap::set_par_threads(int t) {
|
void GenCollectedHeap::set_par_threads(int t) {
|
||||||
SharedHeap::set_par_threads(t);
|
SharedHeap::set_par_threads(t);
|
||||||
_gen_process_strong_tasks->set_par_threads(t);
|
_gen_process_strong_tasks->set_n_threads(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
class AssertIsPermClosure: public OopClosure {
|
class AssertIsPermClosure: public OopClosure {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -74,6 +74,7 @@ public:
|
|||||||
// Data structure for claiming the (potentially) parallel tasks in
|
// Data structure for claiming the (potentially) parallel tasks in
|
||||||
// (gen-specific) strong roots processing.
|
// (gen-specific) strong roots processing.
|
||||||
SubTasksDone* _gen_process_strong_tasks;
|
SubTasksDone* _gen_process_strong_tasks;
|
||||||
|
SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
|
||||||
|
|
||||||
// In block contents verification, the number of header words to skip
|
// In block contents verification, the number of header words to skip
|
||||||
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -137,16 +137,17 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
|||||||
_discovery_is_atomic = atomic_discovery;
|
_discovery_is_atomic = atomic_discovery;
|
||||||
_discovery_is_mt = mt_discovery;
|
_discovery_is_mt = mt_discovery;
|
||||||
_num_q = mt_degree;
|
_num_q = mt_degree;
|
||||||
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref);
|
_max_num_q = mt_degree;
|
||||||
|
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
|
||||||
if (_discoveredSoftRefs == NULL) {
|
if (_discoveredSoftRefs == NULL) {
|
||||||
vm_exit_during_initialization("Could not allocated RefProc Array");
|
vm_exit_during_initialization("Could not allocated RefProc Array");
|
||||||
}
|
}
|
||||||
_discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
|
_discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
|
||||||
_discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
|
_discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
|
||||||
_discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
|
_discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
|
||||||
assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
|
assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
|
||||||
// Initialized all entries to _sentinelRef
|
// Initialized all entries to _sentinelRef
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
_discoveredSoftRefs[i].set_head(sentinel_ref());
|
_discoveredSoftRefs[i].set_head(sentinel_ref());
|
||||||
_discoveredSoftRefs[i].set_length(0);
|
_discoveredSoftRefs[i].set_length(0);
|
||||||
}
|
}
|
||||||
@ -159,7 +160,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
|||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void ReferenceProcessor::verify_no_references_recorded() {
|
void ReferenceProcessor::verify_no_references_recorded() {
|
||||||
guarantee(!_discovering_refs, "Discovering refs?");
|
guarantee(!_discovering_refs, "Discovering refs?");
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
guarantee(_discoveredSoftRefs[i].empty(),
|
guarantee(_discoveredSoftRefs[i].empty(),
|
||||||
"Found non-empty discovered list");
|
"Found non-empty discovered list");
|
||||||
}
|
}
|
||||||
@ -167,7 +168,11 @@ void ReferenceProcessor::verify_no_references_recorded() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
|
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
// Should this instead be
|
||||||
|
// for (int i = 0; i < subclasses_of_ref; i++_ {
|
||||||
|
// for (int j = 0; j < _num_q; j++) {
|
||||||
|
// int index = i * _max_num_q + j;
|
||||||
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
|
f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
|
||||||
} else {
|
} else {
|
||||||
@ -395,7 +400,15 @@ public:
|
|||||||
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
|
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
|
||||||
// Simplest first cut: static partitioning.
|
// Simplest first cut: static partitioning.
|
||||||
int index = work_id;
|
int index = work_id;
|
||||||
for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) {
|
// The increment on "index" must correspond to the maximum number of queues
|
||||||
|
// (n_queues) with which that ReferenceProcessor was created. That
|
||||||
|
// is because of the "clever" way the discovered references lists were
|
||||||
|
// allocated and are indexed into. That number is ParallelGCThreads
|
||||||
|
// currently. Assert that.
|
||||||
|
assert(_n_queues == (int) ParallelGCThreads, "Different number not expected");
|
||||||
|
for (int j = 0;
|
||||||
|
j < subclasses_of_ref;
|
||||||
|
j++, index += _n_queues) {
|
||||||
_ref_processor.enqueue_discovered_reflist(
|
_ref_processor.enqueue_discovered_reflist(
|
||||||
_refs_lists[index], _pending_list_addr);
|
_refs_lists[index], _pending_list_addr);
|
||||||
_refs_lists[index].set_head(_sentinel_ref);
|
_refs_lists[index].set_head(_sentinel_ref);
|
||||||
@ -410,11 +423,11 @@ void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr
|
|||||||
if (_processing_is_mt && task_executor != NULL) {
|
if (_processing_is_mt && task_executor != NULL) {
|
||||||
// Parallel code
|
// Parallel code
|
||||||
RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
|
RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
|
||||||
pending_list_addr, sentinel_ref(), _num_q);
|
pending_list_addr, sentinel_ref(), _max_num_q);
|
||||||
task_executor->execute(tsk);
|
task_executor->execute(tsk);
|
||||||
} else {
|
} else {
|
||||||
// Serial code: call the parent class's implementation
|
// Serial code: call the parent class's implementation
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
|
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
|
||||||
_discoveredSoftRefs[i].set_head(sentinel_ref());
|
_discoveredSoftRefs[i].set_head(sentinel_ref());
|
||||||
_discoveredSoftRefs[i].set_length(0);
|
_discoveredSoftRefs[i].set_length(0);
|
||||||
@ -614,8 +627,9 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
|
|||||||
complete_gc->do_void();
|
complete_gc->do_void();
|
||||||
NOT_PRODUCT(
|
NOT_PRODUCT(
|
||||||
if (PrintGCDetails && TraceReferenceGC) {
|
if (PrintGCDetails && TraceReferenceGC) {
|
||||||
gclog_or_tty->print(" Dropped %d dead Refs out of %d "
|
gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
|
||||||
"discovered Refs by policy ", iter.removed(), iter.processed());
|
"discovered Refs by policy list " INTPTR_FORMAT,
|
||||||
|
iter.removed(), iter.processed(), (address)refs_list.head());
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -651,8 +665,9 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
|
|||||||
}
|
}
|
||||||
NOT_PRODUCT(
|
NOT_PRODUCT(
|
||||||
if (PrintGCDetails && TraceReferenceGC) {
|
if (PrintGCDetails && TraceReferenceGC) {
|
||||||
gclog_or_tty->print(" Dropped %d active Refs out of %d "
|
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
|
||||||
"Refs in discovered list ", iter.removed(), iter.processed());
|
"Refs in discovered list " INTPTR_FORMAT,
|
||||||
|
iter.removed(), iter.processed(), (address)refs_list.head());
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -689,8 +704,9 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
|
|||||||
complete_gc->do_void();
|
complete_gc->do_void();
|
||||||
NOT_PRODUCT(
|
NOT_PRODUCT(
|
||||||
if (PrintGCDetails && TraceReferenceGC) {
|
if (PrintGCDetails && TraceReferenceGC) {
|
||||||
gclog_or_tty->print(" Dropped %d active Refs out of %d "
|
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
|
||||||
"Refs in discovered list ", iter.removed(), iter.processed());
|
"Refs in discovered list " INTPTR_FORMAT,
|
||||||
|
iter.removed(), iter.processed(), (address)refs_list.head());
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -704,6 +720,7 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
|
|||||||
BoolObjectClosure* is_alive,
|
BoolObjectClosure* is_alive,
|
||||||
OopClosure* keep_alive,
|
OopClosure* keep_alive,
|
||||||
VoidClosure* complete_gc) {
|
VoidClosure* complete_gc) {
|
||||||
|
ResourceMark rm;
|
||||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||||
while (iter.has_next()) {
|
while (iter.has_next()) {
|
||||||
iter.update_discovered();
|
iter.update_discovered();
|
||||||
@ -743,8 +760,8 @@ ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
|
|||||||
|
|
||||||
void ReferenceProcessor::abandon_partial_discovery() {
|
void ReferenceProcessor::abandon_partial_discovery() {
|
||||||
// loop over the lists
|
// loop over the lists
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
|
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"\nAbandoning %s discovered list",
|
"\nAbandoning %s discovered list",
|
||||||
list_name(i));
|
list_name(i));
|
||||||
@ -766,7 +783,9 @@ public:
|
|||||||
OopClosure& keep_alive,
|
OopClosure& keep_alive,
|
||||||
VoidClosure& complete_gc)
|
VoidClosure& complete_gc)
|
||||||
{
|
{
|
||||||
_ref_processor.process_phase1(_refs_lists[i], _policy,
|
Thread* thr = Thread::current();
|
||||||
|
int refs_list_index = ((WorkerThread*)thr)->id();
|
||||||
|
_ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
|
||||||
&is_alive, &keep_alive, &complete_gc);
|
&is_alive, &keep_alive, &complete_gc);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
@ -802,6 +821,11 @@ public:
|
|||||||
OopClosure& keep_alive,
|
OopClosure& keep_alive,
|
||||||
VoidClosure& complete_gc)
|
VoidClosure& complete_gc)
|
||||||
{
|
{
|
||||||
|
// Don't use "refs_list_index" calculated in this way because
|
||||||
|
// balance_queues() has moved the Ref's into the first n queues.
|
||||||
|
// Thread* thr = Thread::current();
|
||||||
|
// int refs_list_index = ((WorkerThread*)thr)->id();
|
||||||
|
// _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
|
||||||
_ref_processor.process_phase3(_refs_lists[i], _clear_referent,
|
_ref_processor.process_phase3(_refs_lists[i], _clear_referent,
|
||||||
&is_alive, &keep_alive, &complete_gc);
|
&is_alive, &keep_alive, &complete_gc);
|
||||||
}
|
}
|
||||||
@ -810,23 +834,47 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Balances reference queues.
|
// Balances reference queues.
|
||||||
|
// Move entries from all queues[0, 1, ..., _max_num_q-1] to
|
||||||
|
// queues[0, 1, ..., _num_q-1] because only the first _num_q
|
||||||
|
// corresponding to the active workers will be processed.
|
||||||
void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
|
void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
|
||||||
{
|
{
|
||||||
// calculate total length
|
// calculate total length
|
||||||
size_t total_refs = 0;
|
size_t total_refs = 0;
|
||||||
for (int i = 0; i < _num_q; ++i) {
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print_cr("\nBalance ref_lists ");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < _max_num_q; ++i) {
|
||||||
total_refs += ref_lists[i].length();
|
total_refs += ref_lists[i].length();
|
||||||
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print("%d ", ref_lists[i].length());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print_cr(" = %d", total_refs);
|
||||||
}
|
}
|
||||||
size_t avg_refs = total_refs / _num_q + 1;
|
size_t avg_refs = total_refs / _num_q + 1;
|
||||||
int to_idx = 0;
|
int to_idx = 0;
|
||||||
for (int from_idx = 0; from_idx < _num_q; from_idx++) {
|
for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
|
||||||
while (ref_lists[from_idx].length() > avg_refs) {
|
bool move_all = false;
|
||||||
|
if (from_idx >= _num_q) {
|
||||||
|
move_all = ref_lists[from_idx].length() > 0;
|
||||||
|
}
|
||||||
|
while ((ref_lists[from_idx].length() > avg_refs) ||
|
||||||
|
move_all) {
|
||||||
assert(to_idx < _num_q, "Sanity Check!");
|
assert(to_idx < _num_q, "Sanity Check!");
|
||||||
if (ref_lists[to_idx].length() < avg_refs) {
|
if (ref_lists[to_idx].length() < avg_refs) {
|
||||||
// move superfluous refs
|
// move superfluous refs
|
||||||
size_t refs_to_move =
|
size_t refs_to_move;
|
||||||
MIN2(ref_lists[from_idx].length() - avg_refs,
|
// Move all the Ref's if the from queue will not be processed.
|
||||||
avg_refs - ref_lists[to_idx].length());
|
if (move_all) {
|
||||||
|
refs_to_move = MIN2(ref_lists[from_idx].length(),
|
||||||
|
avg_refs - ref_lists[to_idx].length());
|
||||||
|
} else {
|
||||||
|
refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
|
||||||
|
avg_refs - ref_lists[to_idx].length());
|
||||||
|
}
|
||||||
oop move_head = ref_lists[from_idx].head();
|
oop move_head = ref_lists[from_idx].head();
|
||||||
oop move_tail = move_head;
|
oop move_tail = move_head;
|
||||||
oop new_head = move_head;
|
oop new_head = move_head;
|
||||||
@ -840,11 +888,35 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
|
|||||||
ref_lists[to_idx].inc_length(refs_to_move);
|
ref_lists[to_idx].inc_length(refs_to_move);
|
||||||
ref_lists[from_idx].set_head(new_head);
|
ref_lists[from_idx].set_head(new_head);
|
||||||
ref_lists[from_idx].dec_length(refs_to_move);
|
ref_lists[from_idx].dec_length(refs_to_move);
|
||||||
|
if (ref_lists[from_idx].length() == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
++to_idx;
|
to_idx = (to_idx + 1) % _num_q;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef ASSERT
|
||||||
|
size_t balanced_total_refs = 0;
|
||||||
|
for (int i = 0; i < _max_num_q; ++i) {
|
||||||
|
balanced_total_refs += ref_lists[i].length();
|
||||||
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print("%d ", ref_lists[i].length());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print_cr(" = %d", balanced_total_refs);
|
||||||
|
gclog_or_tty->flush();
|
||||||
|
}
|
||||||
|
assert(total_refs == balanced_total_refs, "Balancing was incomplete");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReferenceProcessor::balance_all_queues() {
|
||||||
|
balance_queues(_discoveredSoftRefs);
|
||||||
|
balance_queues(_discoveredWeakRefs);
|
||||||
|
balance_queues(_discoveredFinalRefs);
|
||||||
|
balance_queues(_discoveredPhantomRefs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -857,8 +929,17 @@ ReferenceProcessor::process_discovered_reflist(
|
|||||||
VoidClosure* complete_gc,
|
VoidClosure* complete_gc,
|
||||||
AbstractRefProcTaskExecutor* task_executor)
|
AbstractRefProcTaskExecutor* task_executor)
|
||||||
{
|
{
|
||||||
bool mt = task_executor != NULL && _processing_is_mt;
|
bool mt_processing = task_executor != NULL && _processing_is_mt;
|
||||||
if (mt && ParallelRefProcBalancingEnabled) {
|
// If discovery used MT and a dynamic number of GC threads, then
|
||||||
|
// the queues must be balanced for correctness if fewer than the
|
||||||
|
// maximum number of queues were used. The number of queue used
|
||||||
|
// during discovery may be different than the number to be used
|
||||||
|
// for processing so don't depend of _num_q < _max_num_q as part
|
||||||
|
// of the test.
|
||||||
|
bool must_balance = _discovery_is_mt;
|
||||||
|
|
||||||
|
if ((mt_processing && ParallelRefProcBalancingEnabled) ||
|
||||||
|
must_balance) {
|
||||||
balance_queues(refs_lists);
|
balance_queues(refs_lists);
|
||||||
}
|
}
|
||||||
if (PrintReferenceGC && PrintGCDetails) {
|
if (PrintReferenceGC && PrintGCDetails) {
|
||||||
@ -875,7 +956,7 @@ ReferenceProcessor::process_discovered_reflist(
|
|||||||
// policy reasons. Keep alive the transitive closure of all
|
// policy reasons. Keep alive the transitive closure of all
|
||||||
// such referents.
|
// such referents.
|
||||||
if (policy != NULL) {
|
if (policy != NULL) {
|
||||||
if (mt) {
|
if (mt_processing) {
|
||||||
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
|
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
|
||||||
task_executor->execute(phase1);
|
task_executor->execute(phase1);
|
||||||
} else {
|
} else {
|
||||||
@ -891,7 +972,7 @@ ReferenceProcessor::process_discovered_reflist(
|
|||||||
|
|
||||||
// Phase 2:
|
// Phase 2:
|
||||||
// . Traverse the list and remove any refs whose referents are alive.
|
// . Traverse the list and remove any refs whose referents are alive.
|
||||||
if (mt) {
|
if (mt_processing) {
|
||||||
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
|
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
|
||||||
task_executor->execute(phase2);
|
task_executor->execute(phase2);
|
||||||
} else {
|
} else {
|
||||||
@ -902,7 +983,7 @@ ReferenceProcessor::process_discovered_reflist(
|
|||||||
|
|
||||||
// Phase 3:
|
// Phase 3:
|
||||||
// . Traverse the list and process referents as appropriate.
|
// . Traverse the list and process referents as appropriate.
|
||||||
if (mt) {
|
if (mt_processing) {
|
||||||
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
|
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
|
||||||
task_executor->execute(phase3);
|
task_executor->execute(phase3);
|
||||||
} else {
|
} else {
|
||||||
@ -915,7 +996,11 @@ ReferenceProcessor::process_discovered_reflist(
|
|||||||
|
|
||||||
void ReferenceProcessor::clean_up_discovered_references() {
|
void ReferenceProcessor::clean_up_discovered_references() {
|
||||||
// loop over the lists
|
// loop over the lists
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
// Should this instead be
|
||||||
|
// for (int i = 0; i < subclasses_of_ref; i++_ {
|
||||||
|
// for (int j = 0; j < _num_q; j++) {
|
||||||
|
// int index = i * _max_num_q + j;
|
||||||
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
|
if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"\nScrubbing %s discovered list of Null referents",
|
"\nScrubbing %s discovered list of Null referents",
|
||||||
@ -976,7 +1061,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
|||||||
id = next_id();
|
id = next_id();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)");
|
assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
|
||||||
|
|
||||||
// Get the discovered queue to which we will add
|
// Get the discovered queue to which we will add
|
||||||
DiscoveredList* list = NULL;
|
DiscoveredList* list = NULL;
|
||||||
@ -1001,6 +1086,10 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
|||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
|
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT,
|
||||||
|
id, list);
|
||||||
|
}
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1243,7 +1332,7 @@ void ReferenceProcessor::preclean_discovered_references(
|
|||||||
{
|
{
|
||||||
TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
|
TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
|
||||||
false, gclog_or_tty);
|
false, gclog_or_tty);
|
||||||
for (int i = 0; i < _num_q; i++) {
|
for (int i = 0; i < _max_num_q; i++) {
|
||||||
if (yield->should_return()) {
|
if (yield->should_return()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1340,15 +1429,16 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
|
|||||||
|
|
||||||
NOT_PRODUCT(
|
NOT_PRODUCT(
|
||||||
if (PrintGCDetails && PrintReferenceGC) {
|
if (PrintGCDetails && PrintReferenceGC) {
|
||||||
gclog_or_tty->print(" Dropped %d Refs out of %d "
|
gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
|
||||||
"Refs in discovered list ", iter.removed(), iter.processed());
|
"Refs in discovered list " INTPTR_FORMAT,
|
||||||
|
iter.removed(), iter.processed(), (address)refs_list.head());
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* ReferenceProcessor::list_name(int i) {
|
const char* ReferenceProcessor::list_name(int i) {
|
||||||
assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index");
|
assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
|
||||||
int j = i / _num_q;
|
int j = i / _max_num_q;
|
||||||
switch (j) {
|
switch (j) {
|
||||||
case 0: return "SoftRef";
|
case 0: return "SoftRef";
|
||||||
case 1: return "WeakRef";
|
case 1: return "WeakRef";
|
||||||
@ -1372,7 +1462,7 @@ void ReferenceProcessor::verify() {
|
|||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void ReferenceProcessor::clear_discovered_references() {
|
void ReferenceProcessor::clear_discovered_references() {
|
||||||
guarantee(!_discovering_refs, "Discovering refs?");
|
guarantee(!_discovering_refs, "Discovering refs?");
|
||||||
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
oop obj = _discoveredSoftRefs[i].head();
|
oop obj = _discoveredSoftRefs[i].head();
|
||||||
while (obj != sentinel_ref()) {
|
while (obj != sentinel_ref()) {
|
||||||
oop next = java_lang_ref_Reference::discovered(obj);
|
oop next = java_lang_ref_Reference::discovered(obj);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -85,8 +85,10 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
|
|
||||||
// The discovered ref lists themselves
|
// The discovered ref lists themselves
|
||||||
|
|
||||||
// The MT'ness degree of the queues below
|
// The active MT'ness degree of the queues below
|
||||||
int _num_q;
|
int _num_q;
|
||||||
|
// The maximum MT'ness degree of the queues below
|
||||||
|
int _max_num_q;
|
||||||
// Arrays of lists of oops, one per thread
|
// Arrays of lists of oops, one per thread
|
||||||
DiscoveredList* _discoveredSoftRefs;
|
DiscoveredList* _discoveredSoftRefs;
|
||||||
DiscoveredList* _discoveredWeakRefs;
|
DiscoveredList* _discoveredWeakRefs;
|
||||||
@ -95,6 +97,7 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
int num_q() { return _num_q; }
|
int num_q() { return _num_q; }
|
||||||
|
void set_mt_degree(int v) { _num_q = v; }
|
||||||
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
|
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
|
||||||
static oop sentinel_ref() { return _sentinelRef; }
|
static oop sentinel_ref() { return _sentinelRef; }
|
||||||
static oop* adr_sentinel_ref() { return &_sentinelRef; }
|
static oop* adr_sentinel_ref() { return &_sentinelRef; }
|
||||||
@ -244,6 +247,7 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
_bs(NULL),
|
_bs(NULL),
|
||||||
_is_alive_non_header(NULL),
|
_is_alive_non_header(NULL),
|
||||||
_num_q(0),
|
_num_q(0),
|
||||||
|
_max_num_q(0),
|
||||||
_processing_is_mt(false),
|
_processing_is_mt(false),
|
||||||
_next_id(0)
|
_next_id(0)
|
||||||
{}
|
{}
|
||||||
@ -312,6 +316,9 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
void weak_oops_do(OopClosure* f); // weak roots
|
void weak_oops_do(OopClosure* f); // weak roots
|
||||||
static void oops_do(OopClosure* f); // strong root(s)
|
static void oops_do(OopClosure* f); // strong root(s)
|
||||||
|
|
||||||
|
// Balance each of the discovered lists.
|
||||||
|
void balance_all_queues();
|
||||||
|
|
||||||
// Discover a Reference object, using appropriate discovery criteria
|
// Discover a Reference object, using appropriate discovery criteria
|
||||||
bool discover_reference(oop obj, ReferenceType rt);
|
bool discover_reference(oop obj, ReferenceType rt);
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -50,7 +50,8 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
|
|||||||
_perm_gen(NULL), _rem_set(NULL),
|
_perm_gen(NULL), _rem_set(NULL),
|
||||||
_strong_roots_parity(0),
|
_strong_roots_parity(0),
|
||||||
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
|
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
|
||||||
_workers(NULL), _n_par_threads(0)
|
_n_par_threads(0),
|
||||||
|
_workers(NULL)
|
||||||
{
|
{
|
||||||
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
@ -60,11 +61,13 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
|
|||||||
(UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
|
(UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
|
||||||
UseG1GC) &&
|
UseG1GC) &&
|
||||||
ParallelGCThreads > 0) {
|
ParallelGCThreads > 0) {
|
||||||
_workers = new WorkGang("Parallel GC Threads", ParallelGCThreads,
|
_workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
|
||||||
/* are_GC_task_threads */true,
|
/* are_GC_task_threads */true,
|
||||||
/* are_ConcurrentGC_threads */false);
|
/* are_ConcurrentGC_threads */false);
|
||||||
if (_workers == NULL) {
|
if (_workers == NULL) {
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
|
} else {
|
||||||
|
_workers->initialize_workers();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -77,8 +80,9 @@ bool SharedHeap::heap_lock_held_for_gc() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SharedHeap::set_par_threads(int t) {
|
void SharedHeap::set_par_threads(int t) {
|
||||||
|
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
|
||||||
_n_par_threads = t;
|
_n_par_threads = t;
|
||||||
_process_strong_tasks->set_par_threads(t);
|
_process_strong_tasks->set_n_threads(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
class AssertIsPermClosure: public OopClosure {
|
class AssertIsPermClosure: public OopClosure {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -38,6 +38,7 @@ class OopsInGenClosure;
|
|||||||
class ObjectClosure;
|
class ObjectClosure;
|
||||||
class SubTasksDone;
|
class SubTasksDone;
|
||||||
class WorkGang;
|
class WorkGang;
|
||||||
|
class FlexibleWorkGang;
|
||||||
class CollectorPolicy;
|
class CollectorPolicy;
|
||||||
class KlassHandle;
|
class KlassHandle;
|
||||||
|
|
||||||
@ -74,7 +75,7 @@ protected:
|
|||||||
int _strong_roots_parity;
|
int _strong_roots_parity;
|
||||||
|
|
||||||
// If we're doing parallel GC, use this gang of threads.
|
// If we're doing parallel GC, use this gang of threads.
|
||||||
WorkGang* _workers;
|
FlexibleWorkGang* _workers;
|
||||||
|
|
||||||
// Number of parallel threads currently working on GC tasks.
|
// Number of parallel threads currently working on GC tasks.
|
||||||
// O indicates use sequential code; 1 means use parallel code even with
|
// O indicates use sequential code; 1 means use parallel code even with
|
||||||
@ -189,7 +190,7 @@ public:
|
|||||||
SO_CodeCache = 0x10
|
SO_CodeCache = 0x10
|
||||||
};
|
};
|
||||||
|
|
||||||
WorkGang* workers() const { return _workers; }
|
FlexibleWorkGang* workers() const { return _workers; }
|
||||||
|
|
||||||
// Sets the number of parallel threads that will be doing tasks
|
// Sets the number of parallel threads that will be doing tasks
|
||||||
// (such as process strong roots) subsequently.
|
// (such as process strong roots) subsequently.
|
||||||
|
@ -144,6 +144,7 @@ void ParallelTaskTerminator::sleep(uint millis) {
|
|||||||
|
|
||||||
bool
|
bool
|
||||||
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||||
|
assert(_n_threads > 0, "Initialization is incorrect");
|
||||||
assert(_offered_termination < _n_threads, "Invariant");
|
assert(_offered_termination < _n_threads, "Invariant");
|
||||||
Atomic::inc(&_offered_termination);
|
Atomic::inc(&_offered_termination);
|
||||||
|
|
||||||
@ -255,3 +256,9 @@ bool ObjArrayTask::is_valid() const {
|
|||||||
_index < objArrayOop(_obj)->length();
|
_index < objArrayOop(_obj)->length();
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
|
void ParallelTaskTerminator::reset_for_reuse(int n_threads) {
|
||||||
|
reset_for_reuse();
|
||||||
|
_n_threads = n_threads;
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -305,6 +305,12 @@ bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pop_local_slow() is done by the owning thread and is trying to
|
||||||
|
// get the last task in the queue. It will compete with pop_global()
|
||||||
|
// that will be used by other threads. The tag age is incremented
|
||||||
|
// whenever the queue goes empty which it will do here if this thread
|
||||||
|
// gets the last task or in pop_global() if the queue wraps (top == 0
|
||||||
|
// and pop_global() succeeds, see pop_global()).
|
||||||
template<class E, unsigned int N>
|
template<class E, unsigned int N>
|
||||||
bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
|
bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
|
||||||
// This queue was observed to contain exactly one element; either this
|
// This queue was observed to contain exactly one element; either this
|
||||||
@ -637,6 +643,9 @@ public:
|
|||||||
// in an MT-safe manner, once the previous round of use of
|
// in an MT-safe manner, once the previous round of use of
|
||||||
// the terminator is finished.
|
// the terminator is finished.
|
||||||
void reset_for_reuse();
|
void reset_for_reuse();
|
||||||
|
// Same as above but the number of parallel threads is set to the
|
||||||
|
// given number.
|
||||||
|
void reset_for_reuse(int n_threads);
|
||||||
|
|
||||||
#ifdef TRACESPINNING
|
#ifdef TRACESPINNING
|
||||||
static uint total_yields() { return _total_yields; }
|
static uint total_yields() { return _total_yields; }
|
||||||
@ -782,3 +791,4 @@ typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
|
|||||||
|
|
||||||
typedef OverflowTaskQueue<size_t> RegionTaskQueue;
|
typedef OverflowTaskQueue<size_t> RegionTaskQueue;
|
||||||
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
|
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2007, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -53,28 +53,52 @@ WorkGang::WorkGang(const char* name,
|
|||||||
int workers,
|
int workers,
|
||||||
bool are_GC_task_threads,
|
bool are_GC_task_threads,
|
||||||
bool are_ConcurrentGC_threads) :
|
bool are_ConcurrentGC_threads) :
|
||||||
AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads)
|
AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) {
|
||||||
{
|
|
||||||
// Save arguments.
|
// Save arguments.
|
||||||
_total_workers = workers;
|
_total_workers = workers;
|
||||||
|
}
|
||||||
|
|
||||||
|
GangWorker* WorkGang::allocate_worker(int which) {
|
||||||
|
GangWorker* new_worker = new GangWorker(this, which);
|
||||||
|
return new_worker;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The current implementation will exit if the allocation
|
||||||
|
// of any worker fails. Still, return a boolean so that
|
||||||
|
// a future implementation can possibly do a partial
|
||||||
|
// initialization of the workers and report such to the
|
||||||
|
// caller.
|
||||||
|
bool WorkGang::initialize_workers() {
|
||||||
|
|
||||||
if (TraceWorkGang) {
|
if (TraceWorkGang) {
|
||||||
tty->print_cr("Constructing work gang %s with %d threads", name, workers);
|
tty->print_cr("Constructing work gang %s with %d threads",
|
||||||
|
name(),
|
||||||
|
total_workers());
|
||||||
}
|
}
|
||||||
_gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers);
|
_gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers());
|
||||||
if (gang_workers() == NULL) {
|
if (gang_workers() == NULL) {
|
||||||
vm_exit_out_of_memory(0, "Cannot create GangWorker array.");
|
vm_exit_out_of_memory(0, "Cannot create GangWorker array.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
os::ThreadType worker_type;
|
||||||
|
if (are_ConcurrentGC_threads()) {
|
||||||
|
worker_type = os::cgc_thread;
|
||||||
|
} else {
|
||||||
|
worker_type = os::pgc_thread;
|
||||||
}
|
}
|
||||||
for (int worker = 0; worker < total_workers(); worker += 1) {
|
for (int worker = 0; worker < total_workers(); worker += 1) {
|
||||||
GangWorker* new_worker = new GangWorker(this, worker);
|
GangWorker* new_worker = allocate_worker(worker);
|
||||||
assert(new_worker != NULL, "Failed to allocate GangWorker");
|
assert(new_worker != NULL, "Failed to allocate GangWorker");
|
||||||
_gang_workers[worker] = new_worker;
|
_gang_workers[worker] = new_worker;
|
||||||
if (new_worker == NULL || !os::create_thread(new_worker, os::pgc_thread))
|
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
|
||||||
vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources.");
|
vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (!DisableStartThread) {
|
if (!DisableStartThread) {
|
||||||
os::start_thread(new_worker);
|
os::start_thread(new_worker);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
AbstractWorkGang::~AbstractWorkGang() {
|
AbstractWorkGang::~AbstractWorkGang() {
|
||||||
@ -383,7 +407,7 @@ bool SubTasksDone::valid() {
|
|||||||
return _tasks != NULL;
|
return _tasks != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SubTasksDone::set_par_threads(int t) {
|
void SubTasksDone::set_n_threads(int t) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
assert(_claimed == 0 || _threads_completed == _n_threads,
|
assert(_claimed == 0 || _threads_completed == _n_threads,
|
||||||
"should not be called while tasks are being processed!");
|
"should not be called while tasks are being processed!");
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,6 +29,7 @@ class GangWorker;
|
|||||||
class YieldingFlexibleGangWorker;
|
class YieldingFlexibleGangWorker;
|
||||||
class YieldingFlexibleGangTask;
|
class YieldingFlexibleGangTask;
|
||||||
class WorkData;
|
class WorkData;
|
||||||
|
class AbstractWorkGang;
|
||||||
|
|
||||||
// An abstract task to be worked on by a gang.
|
// An abstract task to be worked on by a gang.
|
||||||
// You subclass this to supply your own work() method
|
// You subclass this to supply your own work() method
|
||||||
@ -38,6 +39,13 @@ public:
|
|||||||
// The argument tells you which member of the gang you are.
|
// The argument tells you which member of the gang you are.
|
||||||
virtual void work(int i) = 0;
|
virtual void work(int i) = 0;
|
||||||
|
|
||||||
|
// This method configures the task for proper termination.
|
||||||
|
// Some tasks do not have any requirements on termination
|
||||||
|
// and may inherit this method that does nothing. Some
|
||||||
|
// tasks do some coordination on termination and override
|
||||||
|
// this method to implement that coordination.
|
||||||
|
virtual void set_for_termination(int active_workers) {};
|
||||||
|
|
||||||
// Debugging accessor for the name.
|
// Debugging accessor for the name.
|
||||||
const char* name() const PRODUCT_RETURN_(return NULL;);
|
const char* name() const PRODUCT_RETURN_(return NULL;);
|
||||||
int counter() { return _counter; }
|
int counter() { return _counter; }
|
||||||
@ -64,6 +72,18 @@ protected:
|
|||||||
virtual ~AbstractGangTask() { }
|
virtual ~AbstractGangTask() { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class AbstractGangTaskWOopQueues : public AbstractGangTask {
|
||||||
|
OopTaskQueueSet* _queues;
|
||||||
|
ParallelTaskTerminator _terminator;
|
||||||
|
public:
|
||||||
|
AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues) :
|
||||||
|
AbstractGangTask(name), _queues(queues), _terminator(0, _queues) {}
|
||||||
|
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||||
|
virtual void set_for_termination(int active_workers) {
|
||||||
|
terminator()->reset_for_reuse(active_workers);
|
||||||
|
}
|
||||||
|
OopTaskQueueSet* queues() { return _queues; }
|
||||||
|
};
|
||||||
|
|
||||||
// Class AbstractWorkGang:
|
// Class AbstractWorkGang:
|
||||||
// An abstract class representing a gang of workers.
|
// An abstract class representing a gang of workers.
|
||||||
@ -114,6 +134,9 @@ public:
|
|||||||
int total_workers() const {
|
int total_workers() const {
|
||||||
return _total_workers;
|
return _total_workers;
|
||||||
}
|
}
|
||||||
|
virtual int active_workers() const {
|
||||||
|
return _total_workers;
|
||||||
|
}
|
||||||
bool terminate() const {
|
bool terminate() const {
|
||||||
return _terminate;
|
return _terminate;
|
||||||
}
|
}
|
||||||
@ -199,6 +222,13 @@ public:
|
|||||||
bool are_GC_task_threads, bool are_ConcurrentGC_threads);
|
bool are_GC_task_threads, bool are_ConcurrentGC_threads);
|
||||||
// Run a task, returns when the task is done (or terminated).
|
// Run a task, returns when the task is done (or terminated).
|
||||||
virtual void run_task(AbstractGangTask* task);
|
virtual void run_task(AbstractGangTask* task);
|
||||||
|
void run_task(AbstractGangTask* task, uint no_of_parallel_workers);
|
||||||
|
// Allocate a worker and return a pointer to it.
|
||||||
|
virtual GangWorker* allocate_worker(int which);
|
||||||
|
// Initialize workers in the gang. Return true if initialization
|
||||||
|
// succeeded. The type of the worker can be overridden in a derived
|
||||||
|
// class with the appropriate implementation of allocate_worker().
|
||||||
|
bool initialize_workers();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class GangWorker:
|
// Class GangWorker:
|
||||||
@ -226,6 +256,34 @@ public:
|
|||||||
AbstractWorkGang* gang() const { return _gang; }
|
AbstractWorkGang* gang() const { return _gang; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class FlexibleWorkGang: public WorkGang {
|
||||||
|
protected:
|
||||||
|
int _active_workers;
|
||||||
|
public:
|
||||||
|
// Constructor and destructor.
|
||||||
|
FlexibleWorkGang(const char* name, int workers,
|
||||||
|
bool are_GC_task_threads,
|
||||||
|
bool are_ConcurrentGC_threads) :
|
||||||
|
WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads) {
|
||||||
|
_active_workers = ParallelGCThreads;
|
||||||
|
};
|
||||||
|
// Accessors for fields
|
||||||
|
virtual int active_workers() const { return _active_workers; }
|
||||||
|
void set_active_workers(int v) { _active_workers = v; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Work gangs in garbage collectors: 2009-06-10
|
||||||
|
//
|
||||||
|
// SharedHeap - work gang for stop-the-world parallel collection.
|
||||||
|
// Used by
|
||||||
|
// ParNewGeneration
|
||||||
|
// CMSParRemarkTask
|
||||||
|
// CMSRefProcTaskExecutor
|
||||||
|
// G1CollectedHeap
|
||||||
|
// G1ParFinalCountTask
|
||||||
|
// ConcurrentMark
|
||||||
|
// CMSCollector
|
||||||
|
|
||||||
// A class that acts as a synchronisation barrier. Workers enter
|
// A class that acts as a synchronisation barrier. Workers enter
|
||||||
// the barrier and must wait until all other workers have entered
|
// the barrier and must wait until all other workers have entered
|
||||||
// before any of them may leave.
|
// before any of them may leave.
|
||||||
@ -271,7 +329,7 @@ class SubTasksDone: public CHeapObj {
|
|||||||
int _n_threads;
|
int _n_threads;
|
||||||
jint _threads_completed;
|
jint _threads_completed;
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
jint _claimed;
|
volatile jint _claimed;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Set all tasks to unclaimed.
|
// Set all tasks to unclaimed.
|
||||||
@ -286,9 +344,10 @@ public:
|
|||||||
// True iff the object is in a valid state.
|
// True iff the object is in a valid state.
|
||||||
bool valid();
|
bool valid();
|
||||||
|
|
||||||
// Set the number of parallel threads doing the tasks to "t". Can only
|
// Get/set the number of parallel threads doing the tasks to "t". Can only
|
||||||
// be called before tasks start or after they are complete.
|
// be called before tasks start or after they are complete.
|
||||||
void set_par_threads(int t);
|
int n_threads() { return _n_threads; }
|
||||||
|
void set_n_threads(int t);
|
||||||
|
|
||||||
// Returns "false" if the task "t" is unclaimed, and ensures that task is
|
// Returns "false" if the task "t" is unclaimed, and ensures that task is
|
||||||
// claimed. The task "t" is required to be within the range of "this".
|
// claimed. The task "t" is required to be within the range of "this".
|
||||||
@ -315,13 +374,17 @@ class SequentialSubTasksDone : public StackObj {
|
|||||||
protected:
|
protected:
|
||||||
jint _n_tasks; // Total number of tasks available.
|
jint _n_tasks; // Total number of tasks available.
|
||||||
jint _n_claimed; // Number of tasks claimed.
|
jint _n_claimed; // Number of tasks claimed.
|
||||||
|
// _n_threads is used to determine when a sub task is done.
|
||||||
|
// See comments on SubTasksDone::_n_threads
|
||||||
jint _n_threads; // Total number of parallel threads.
|
jint _n_threads; // Total number of parallel threads.
|
||||||
jint _n_completed; // Number of completed threads.
|
jint _n_completed; // Number of completed threads.
|
||||||
|
|
||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SequentialSubTasksDone() { clear(); }
|
SequentialSubTasksDone() {
|
||||||
|
clear();
|
||||||
|
}
|
||||||
~SequentialSubTasksDone() {}
|
~SequentialSubTasksDone() {}
|
||||||
|
|
||||||
// True iff the object is in a valid state.
|
// True iff the object is in a valid state.
|
||||||
@ -330,11 +393,12 @@ public:
|
|||||||
// number of tasks
|
// number of tasks
|
||||||
jint n_tasks() const { return _n_tasks; }
|
jint n_tasks() const { return _n_tasks; }
|
||||||
|
|
||||||
// Set the number of parallel threads doing the tasks to t.
|
// Get/set the number of parallel threads doing the tasks to t.
|
||||||
// Should be called before the task starts but it is safe
|
// Should be called before the task starts but it is safe
|
||||||
// to call this once a task is running provided that all
|
// to call this once a task is running provided that all
|
||||||
// threads agree on the number of threads.
|
// threads agree on the number of threads.
|
||||||
void set_par_threads(int t) { _n_threads = t; }
|
int n_threads() { return _n_threads; }
|
||||||
|
void set_n_threads(int t) { _n_threads = t; }
|
||||||
|
|
||||||
// Set the number of tasks to be claimed to t. As above,
|
// Set the number of tasks to be claimed to t. As above,
|
||||||
// should be called before the tasks start but it is safe
|
// should be called before the tasks start but it is safe
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -32,29 +32,13 @@ class WorkData;
|
|||||||
|
|
||||||
YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
|
YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
|
||||||
const char* name, int workers, bool are_GC_task_threads) :
|
const char* name, int workers, bool are_GC_task_threads) :
|
||||||
AbstractWorkGang(name, are_GC_task_threads, false) {
|
FlexibleWorkGang(name, workers, are_GC_task_threads, false),
|
||||||
// Save arguments.
|
_yielded_workers(0) {}
|
||||||
_total_workers = workers;
|
|
||||||
assert(_total_workers > 0, "Must have more than 1 worker");
|
|
||||||
|
|
||||||
_yielded_workers = 0;
|
GangWorker* YieldingFlexibleWorkGang::allocate_worker(int which) {
|
||||||
|
YieldingFlexibleGangWorker* new_member =
|
||||||
if (TraceWorkGang) {
|
new YieldingFlexibleGangWorker(this, which);
|
||||||
tty->print_cr("Constructing work gang %s with %d threads", name, workers);
|
return (YieldingFlexibleGangWorker*) new_member;
|
||||||
}
|
|
||||||
_gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers);
|
|
||||||
assert(gang_workers() != NULL, "Failed to allocate gang workers");
|
|
||||||
for (int worker = 0; worker < total_workers(); worker += 1) {
|
|
||||||
YieldingFlexibleGangWorker* new_worker =
|
|
||||||
new YieldingFlexibleGangWorker(this, worker);
|
|
||||||
assert(new_worker != NULL, "Failed to allocate YieldingFlexibleGangWorker");
|
|
||||||
_gang_workers[worker] = new_worker;
|
|
||||||
if (new_worker == NULL || !os::create_thread(new_worker, os::pgc_thread))
|
|
||||||
vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources.");
|
|
||||||
if (!DisableStartThread) {
|
|
||||||
os::start_thread(new_worker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run a task; returns when the task is done, or the workers yield,
|
// Run a task; returns when the task is done, or the workers yield,
|
||||||
@ -142,6 +126,7 @@ void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
|
|||||||
_active_workers = total_workers();
|
_active_workers = total_workers();
|
||||||
}
|
}
|
||||||
new_task->set_actual_size(_active_workers);
|
new_task->set_actual_size(_active_workers);
|
||||||
|
new_task->set_for_termination(_active_workers);
|
||||||
|
|
||||||
assert(_started_workers == 0, "Tabula rasa non");
|
assert(_started_workers == 0, "Tabula rasa non");
|
||||||
assert(_finished_workers == 0, "Tabula rasa non");
|
assert(_finished_workers == 0, "Tabula rasa non");
|
||||||
@ -161,22 +146,22 @@ void YieldingFlexibleWorkGang::wait_for_gang() {
|
|||||||
for (Status status = yielding_task()->status();
|
for (Status status = yielding_task()->status();
|
||||||
status != COMPLETED && status != YIELDED && status != ABORTED;
|
status != COMPLETED && status != YIELDED && status != ABORTED;
|
||||||
status = yielding_task()->status()) {
|
status = yielding_task()->status()) {
|
||||||
assert(started_workers() <= active_workers(), "invariant");
|
assert(started_workers() <= total_workers(), "invariant");
|
||||||
assert(finished_workers() <= active_workers(), "invariant");
|
assert(finished_workers() <= total_workers(), "invariant");
|
||||||
assert(yielded_workers() <= active_workers(), "invariant");
|
assert(yielded_workers() <= total_workers(), "invariant");
|
||||||
monitor()->wait(Mutex::_no_safepoint_check_flag);
|
monitor()->wait(Mutex::_no_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
switch (yielding_task()->status()) {
|
switch (yielding_task()->status()) {
|
||||||
case COMPLETED:
|
case COMPLETED:
|
||||||
case ABORTED: {
|
case ABORTED: {
|
||||||
assert(finished_workers() == active_workers(), "Inconsistent status");
|
assert(finished_workers() == total_workers(), "Inconsistent status");
|
||||||
assert(yielded_workers() == 0, "Invariant");
|
assert(yielded_workers() == 0, "Invariant");
|
||||||
reset(); // for next task; gang<->task binding released
|
reset(); // for next task; gang<->task binding released
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case YIELDED: {
|
case YIELDED: {
|
||||||
assert(yielded_workers() > 0, "Invariant");
|
assert(yielded_workers() > 0, "Invariant");
|
||||||
assert(yielded_workers() + finished_workers() == active_workers(),
|
assert(yielded_workers() + finished_workers() == total_workers(),
|
||||||
"Inconsistent counts");
|
"Inconsistent counts");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -208,7 +193,6 @@ void YieldingFlexibleWorkGang::continue_task(
|
|||||||
void YieldingFlexibleWorkGang::reset() {
|
void YieldingFlexibleWorkGang::reset() {
|
||||||
_started_workers = 0;
|
_started_workers = 0;
|
||||||
_finished_workers = 0;
|
_finished_workers = 0;
|
||||||
_active_workers = 0;
|
|
||||||
yielding_task()->set_gang(NULL);
|
yielding_task()->set_gang(NULL);
|
||||||
_task = NULL; // unbind gang from task
|
_task = NULL; // unbind gang from task
|
||||||
}
|
}
|
||||||
@ -216,7 +200,7 @@ void YieldingFlexibleWorkGang::reset() {
|
|||||||
void YieldingFlexibleWorkGang::yield() {
|
void YieldingFlexibleWorkGang::yield() {
|
||||||
assert(task() != NULL, "Inconsistency; should have task binding");
|
assert(task() != NULL, "Inconsistency; should have task binding");
|
||||||
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
|
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
|
||||||
assert(yielded_workers() < active_workers(), "Consistency check");
|
assert(yielded_workers() < total_workers(), "Consistency check");
|
||||||
if (yielding_task()->status() == ABORTING) {
|
if (yielding_task()->status() == ABORTING) {
|
||||||
// Do not yield; we need to abort as soon as possible
|
// Do not yield; we need to abort as soon as possible
|
||||||
// XXX NOTE: This can cause a performance pathology in the
|
// XXX NOTE: This can cause a performance pathology in the
|
||||||
@ -227,7 +211,7 @@ void YieldingFlexibleWorkGang::yield() {
|
|||||||
// us to return at each potential yield point.
|
// us to return at each potential yield point.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (++_yielded_workers + finished_workers() == active_workers()) {
|
if (++_yielded_workers + finished_workers() == total_workers()) {
|
||||||
yielding_task()->set_status(YIELDED);
|
yielding_task()->set_status(YIELDED);
|
||||||
monitor()->notify_all();
|
monitor()->notify_all();
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -54,6 +54,25 @@ protected: // Override from parent class
|
|||||||
virtual void loop();
|
virtual void loop();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class FlexibleGangTask: public AbstractGangTask {
|
||||||
|
int _actual_size; // size of gang obtained
|
||||||
|
protected:
|
||||||
|
int _requested_size; // size of gang requested
|
||||||
|
public:
|
||||||
|
FlexibleGangTask(const char* name): AbstractGangTask(name),
|
||||||
|
_requested_size(0) {}
|
||||||
|
|
||||||
|
// The abstract work method.
|
||||||
|
// The argument tells you which member of the gang you are.
|
||||||
|
virtual void work(int i) = 0;
|
||||||
|
|
||||||
|
int requested_size() const { return _requested_size; }
|
||||||
|
int actual_size() const { return _actual_size; }
|
||||||
|
|
||||||
|
void set_requested_size(int sz) { _requested_size = sz; }
|
||||||
|
void set_actual_size(int sz) { _actual_size = sz; }
|
||||||
|
};
|
||||||
|
|
||||||
// An abstract task to be worked on by a flexible work gang,
|
// An abstract task to be worked on by a flexible work gang,
|
||||||
// and where the workers will periodically yield, usually
|
// and where the workers will periodically yield, usually
|
||||||
// in response to some condition that is signalled by means
|
// in response to some condition that is signalled by means
|
||||||
@ -70,19 +89,15 @@ protected: // Override from parent class
|
|||||||
// maximum) in response to task requests at certain points.
|
// maximum) in response to task requests at certain points.
|
||||||
// The last part (the flexible part) has not yet been fully
|
// The last part (the flexible part) has not yet been fully
|
||||||
// fleshed out and is a work in progress.
|
// fleshed out and is a work in progress.
|
||||||
class YieldingFlexibleGangTask: public AbstractGangTask {
|
class YieldingFlexibleGangTask: public FlexibleGangTask {
|
||||||
Status _status;
|
Status _status;
|
||||||
YieldingFlexibleWorkGang* _gang;
|
YieldingFlexibleWorkGang* _gang;
|
||||||
int _actual_size; // size of gang obtained
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int _requested_size; // size of gang requested
|
|
||||||
|
|
||||||
// Constructor and desctructor: only construct subclasses.
|
// Constructor and desctructor: only construct subclasses.
|
||||||
YieldingFlexibleGangTask(const char* name): AbstractGangTask(name),
|
YieldingFlexibleGangTask(const char* name): FlexibleGangTask(name),
|
||||||
_status(INACTIVE),
|
_status(INACTIVE),
|
||||||
_gang(NULL),
|
_gang(NULL) { }
|
||||||
_requested_size(0) { }
|
|
||||||
|
|
||||||
virtual ~YieldingFlexibleGangTask() { }
|
virtual ~YieldingFlexibleGangTask() { }
|
||||||
|
|
||||||
@ -126,20 +141,13 @@ public:
|
|||||||
bool completed() const { return _status == COMPLETED; }
|
bool completed() const { return _status == COMPLETED; }
|
||||||
bool aborted() const { return _status == ABORTED; }
|
bool aborted() const { return _status == ABORTED; }
|
||||||
bool active() const { return _status == ACTIVE; }
|
bool active() const { return _status == ACTIVE; }
|
||||||
|
|
||||||
int requested_size() const { return _requested_size; }
|
|
||||||
int actual_size() const { return _actual_size; }
|
|
||||||
|
|
||||||
void set_requested_size(int sz) { _requested_size = sz; }
|
|
||||||
void set_actual_size(int sz) { _actual_size = sz; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class YieldingWorkGang: A subclass of WorkGang.
|
// Class YieldingWorkGang: A subclass of WorkGang.
|
||||||
// In particular, a YieldingWorkGang is made up of
|
// In particular, a YieldingWorkGang is made up of
|
||||||
// YieldingGangWorkers, and provides infrastructure
|
// YieldingGangWorkers, and provides infrastructure
|
||||||
// supporting yielding to the "GangOverseer",
|
// supporting yielding to the "GangOverseer",
|
||||||
// being the thread that orchestrates the WorkGang via run_task().
|
// being the thread that orchestrates the WorkGang via run_task().
|
||||||
class YieldingFlexibleWorkGang: public AbstractWorkGang {
|
class YieldingFlexibleWorkGang: public FlexibleWorkGang {
|
||||||
// Here's the public interface to this class.
|
// Here's the public interface to this class.
|
||||||
public:
|
public:
|
||||||
// Constructor and destructor.
|
// Constructor and destructor.
|
||||||
@ -151,6 +159,9 @@ public:
|
|||||||
"Incorrect cast");
|
"Incorrect cast");
|
||||||
return (YieldingFlexibleGangTask*)task();
|
return (YieldingFlexibleGangTask*)task();
|
||||||
}
|
}
|
||||||
|
// Allocate a worker and return a pointer to it.
|
||||||
|
GangWorker* allocate_worker(int which);
|
||||||
|
|
||||||
// Run a task; returns when the task is done, or the workers yield,
|
// Run a task; returns when the task is done, or the workers yield,
|
||||||
// or the task is aborted, or the work gang is terminated via stop().
|
// or the task is aborted, or the work gang is terminated via stop().
|
||||||
// A task that has been yielded can be continued via this same interface
|
// A task that has been yielded can be continued via this same interface
|
||||||
@ -180,10 +191,6 @@ public:
|
|||||||
void abort();
|
void abort();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// The currently active workers in this gang.
|
|
||||||
// This is a number that is dynamically adjusted by
|
|
||||||
// the run_task() method at each subsequent invocation,
|
|
||||||
// using data in the YieldingFlexibleGangTask.
|
|
||||||
int _active_workers;
|
int _active_workers;
|
||||||
int _yielded_workers;
|
int _yielded_workers;
|
||||||
void wait_for_gang();
|
void wait_for_gang();
|
||||||
@ -194,6 +201,7 @@ public:
|
|||||||
return _active_workers;
|
return _active_workers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Accessors for fields
|
||||||
int yielded_workers() const {
|
int yielded_workers() const {
|
||||||
return _yielded_workers;
|
return _yielded_workers;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user