This commit is contained in:
Jon Masamitsu 2011-12-27 12:38:49 -08:00
commit 3c164a7525
30 changed files with 559 additions and 421 deletions

View File

@ -2598,7 +2598,7 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim)); VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
int CFLS_LAB::_global_num_workers[] = VECTOR_257(0); uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls) _cfls(cfls)
@ -2732,7 +2732,7 @@ void CFLS_LAB::retire(int tid) {
// Update globals stats for num_blocks used // Update globals stats for num_blocks used
_global_num_blocks[i] += (_num_blocks[i] - num_retire); _global_num_blocks[i] += (_num_blocks[i] - num_retire);
_global_num_workers[i]++; _global_num_workers[i]++;
assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big"); assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
if (num_retire > 0) { if (num_retire > 0) {
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
// Reset this list. // Reset this list.

View File

@ -631,7 +631,7 @@ class CFLS_LAB : public CHeapObj {
static AdaptiveWeightedAverage static AdaptiveWeightedAverage
_blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method // Internal work method

View File

@ -3779,7 +3779,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
terminator()->reset_for_reuse(active_workers); terminator()->reset_for_reuse(active_workers);
} }
void work(int i); void work(uint worker_id);
bool should_yield() { bool should_yield() {
return ConcurrentMarkSweepThread::should_yield() return ConcurrentMarkSweepThread::should_yield()
&& !_collector->foregroundGCIsActive() && !_collector->foregroundGCIsActive()
@ -3852,7 +3852,7 @@ void CMSConcMarkingTerminator::yield() {
// . if neither is available, offer termination // . if neither is available, offer termination
// -- Terminate and return result // -- Terminate and return result
// //
void CMSConcMarkingTask::work(int i) { void CMSConcMarkingTask::work(uint worker_id) {
elapsedTimer _timer; elapsedTimer _timer;
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
@ -3860,37 +3860,40 @@ void CMSConcMarkingTask::work(int i) {
DEBUG_ONLY(_collector->verify_overflow_empty();) DEBUG_ONLY(_collector->verify_overflow_empty();)
// Before we begin work, our work queue should be empty // Before we begin work, our work queue should be empty
assert(work_queue(i)->size() == 0, "Expected to be empty"); assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
// Scan the bitmap covering _cms_space, tracing through grey objects. // Scan the bitmap covering _cms_space, tracing through grey objects.
_timer.start(); _timer.start();
do_scan_and_mark(i, _cms_space); do_scan_and_mark(worker_id, _cms_space);
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
} }
// ... do the same for the _perm_space // ... do the same for the _perm_space
_timer.reset(); _timer.reset();
_timer.start(); _timer.start();
do_scan_and_mark(i, _perm_space); do_scan_and_mark(worker_id, _perm_space);
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec", gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
} }
// ... do work stealing // ... do work stealing
_timer.reset(); _timer.reset();
_timer.start(); _timer.start();
do_work_steal(i); do_work_steal(worker_id);
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
} }
assert(_collector->_markStack.isEmpty(), "Should have been emptied"); assert(_collector->_markStack.isEmpty(), "Should have been emptied");
assert(work_queue(i)->size() == 0, "Should have been emptied"); assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
// Note that under the current task protocol, the // Note that under the current task protocol, the
// following assertion is true even of the spaces // following assertion is true even of the spaces
// expanded since the completion of the concurrent // expanded since the completion of the concurrent
@ -3946,7 +3949,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
// We allow that there may be no tasks to do here because // We allow that there may be no tasks to do here because
// we are restarting after a stack overflow. // we are restarting after a stack overflow.
assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
int nth_task = 0; uint nth_task = 0;
HeapWord* aligned_start = sp->bottom(); HeapWord* aligned_start = sp->bottom();
if (sp->used_region().contains(_restart_addr)) { if (sp->used_region().contains(_restart_addr)) {
@ -5075,7 +5078,7 @@ class CMSParRemarkTask: public AbstractGangTask {
ParallelTaskTerminator* terminator() { return &_term; } ParallelTaskTerminator* terminator() { return &_term; }
int n_workers() { return _n_workers; } int n_workers() { return _n_workers; }
void work(int i); void work(uint worker_id);
private: private:
// Work method in support of parallel rescan ... of young gen spaces // Work method in support of parallel rescan ... of young gen spaces
@ -5096,7 +5099,7 @@ class CMSParRemarkTask: public AbstractGangTask {
// also is passed to do_dirty_card_rescan_tasks() and to // also is passed to do_dirty_card_rescan_tasks() and to
// do_work_steal() to select the i-th task_queue. // do_work_steal() to select the i-th task_queue.
void CMSParRemarkTask::work(int i) { void CMSParRemarkTask::work(uint worker_id) {
elapsedTimer _timer; elapsedTimer _timer;
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
@ -5107,7 +5110,7 @@ void CMSParRemarkTask::work(int i) {
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
_collector->_span, _collector->ref_processor(), _collector->_span, _collector->ref_processor(),
&(_collector->_markBitMap), &(_collector->_markBitMap),
work_queue(i), &(_collector->_revisitStack)); work_queue(worker_id), &(_collector->_revisitStack));
// Rescan young gen roots first since these are likely // Rescan young gen roots first since these are likely
// coarsely partitioned and may, on that account, constitute // coarsely partitioned and may, on that account, constitute
@ -5128,15 +5131,15 @@ void CMSParRemarkTask::work(int i) {
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0); do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct); do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect); do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"Finished young gen rescan work in %dth thread: %3.3f sec", "Finished young gen rescan work in %dth thread: %3.3f sec",
i, _timer.seconds()); worker_id, _timer.seconds());
} }
} }
@ -5158,7 +5161,7 @@ void CMSParRemarkTask::work(int i) {
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"Finished remaining root rescan work in %dth thread: %3.3f sec", "Finished remaining root rescan work in %dth thread: %3.3f sec",
i, _timer.seconds()); worker_id, _timer.seconds());
} }
// ---------- rescan dirty cards ------------ // ---------- rescan dirty cards ------------
@ -5167,26 +5170,26 @@ void CMSParRemarkTask::work(int i) {
// Do the rescan tasks for each of the two spaces // Do the rescan tasks for each of the two spaces
// (cms_space and perm_space) in turn. // (cms_space and perm_space) in turn.
// "i" is passed to select the "i-th" task_queue // "worker_id" is passed to select the task_queue for "worker_id"
do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl); do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl); do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"Finished dirty card rescan work in %dth thread: %3.3f sec", "Finished dirty card rescan work in %dth thread: %3.3f sec",
i, _timer.seconds()); worker_id, _timer.seconds());
} }
// ---------- steal work from other threads ... // ---------- steal work from other threads ...
// ---------- ... and drain overflow list. // ---------- ... and drain overflow list.
_timer.reset(); _timer.reset();
_timer.start(); _timer.start();
do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i)); do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
_timer.stop(); _timer.stop();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"Finished work stealing in %dth thread: %3.3f sec", "Finished work stealing in %dth thread: %3.3f sec",
i, _timer.seconds()); worker_id, _timer.seconds());
} }
} }
@ -5207,8 +5210,8 @@ CMSParRemarkTask::do_young_space_rescan(int i,
SequentialSubTasksDone* pst = space->par_seq_tasks(); SequentialSubTasksDone* pst = space->par_seq_tasks();
assert(pst->valid(), "Uninitialized use?"); assert(pst->valid(), "Uninitialized use?");
int nth_task = 0; uint nth_task = 0;
int n_tasks = pst->n_tasks(); uint n_tasks = pst->n_tasks();
HeapWord *start, *end; HeapWord *start, *end;
while (!pst->is_task_claimed(/* reference */ nth_task)) { while (!pst->is_task_claimed(/* reference */ nth_task)) {
@ -5220,12 +5223,12 @@ CMSParRemarkTask::do_young_space_rescan(int i,
} else if (nth_task == 0) { } else if (nth_task == 0) {
start = space->bottom(); start = space->bottom();
end = chunk_array[nth_task]; end = chunk_array[nth_task];
} else if (nth_task < (jint)chunk_top) { } else if (nth_task < (uint)chunk_top) {
assert(nth_task >= 1, "Control point invariant"); assert(nth_task >= 1, "Control point invariant");
start = chunk_array[nth_task - 1]; start = chunk_array[nth_task - 1];
end = chunk_array[nth_task]; end = chunk_array[nth_task];
} else { } else {
assert(nth_task == (jint)chunk_top, "Control point invariant"); assert(nth_task == (uint)chunk_top, "Control point invariant");
start = chunk_array[chunk_top - 1]; start = chunk_array[chunk_top - 1];
end = space->top(); end = space->top();
} }
@ -5288,7 +5291,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
assert(pst->valid(), "Uninitialized use?"); assert(pst->valid(), "Uninitialized use?");
int nth_task = 0; uint nth_task = 0;
const int alignment = CardTableModRefBS::card_size * BitsPerWord; const int alignment = CardTableModRefBS::card_size * BitsPerWord;
MemRegion span = sp->used_region(); MemRegion span = sp->used_region();
HeapWord* start_addr = span.start(); HeapWord* start_addr = span.start();
@ -5736,26 +5739,26 @@ public:
CMSParKeepAliveClosure* keep_alive, CMSParKeepAliveClosure* keep_alive,
int* seed); int* seed);
virtual void work(int i); virtual void work(uint worker_id);
}; };
void CMSRefProcTaskProxy::work(int i) { void CMSRefProcTaskProxy::work(uint worker_id) {
assert(_collector->_span.equals(_span), "Inconsistency in _span"); assert(_collector->_span.equals(_span), "Inconsistency in _span");
CMSParKeepAliveClosure par_keep_alive(_collector, _span, CMSParKeepAliveClosure par_keep_alive(_collector, _span,
_mark_bit_map, _mark_bit_map,
&_collector->_revisitStack, &_collector->_revisitStack,
work_queue(i)); work_queue(worker_id));
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
_mark_bit_map, _mark_bit_map,
&_collector->_revisitStack, &_collector->_revisitStack,
work_queue(i)); work_queue(worker_id));
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
_task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
if (_task.marks_oops_alive()) { if (_task.marks_oops_alive()) {
do_work_steal(i, &par_drain_stack, &par_keep_alive, do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
_collector->hash_seed(i)); _collector->hash_seed(worker_id));
} }
assert(work_queue(i)->size() == 0, "work_queue should be empty"); assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
} }
@ -5769,9 +5772,9 @@ public:
_task(task) _task(task)
{ } { }
virtual void work(int i) virtual void work(uint worker_id)
{ {
_task.work(i); _task.work(worker_id);
} }
}; };

View File

@ -264,7 +264,7 @@ prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
// or some improperly initialized variable with leads to no // or some improperly initialized variable with leads to no
// active threads, protect against that in a product build. // active threads, protect against that in a product build.
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1); 1U);
} }
size_t max_waste = n_threads * chunkSize; size_t max_waste = n_threads * chunkSize;
// it should be aligned with respect to chunkSize // it should be aligned with respect to chunkSize

View File

@ -458,8 +458,8 @@ bool ConcurrentMark::not_yet_marked(oop obj) const {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER #endif // _MSC_VER
size_t ConcurrentMark::scale_parallel_threads(size_t n_par_threads) { uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return MAX2((n_par_threads + 2) / 4, (size_t)1); return MAX2((n_par_threads + 2) / 4, 1U);
} }
ConcurrentMark::ConcurrentMark(ReservedSpace rs, ConcurrentMark::ConcurrentMark(ReservedSpace rs,
@ -486,7 +486,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_regionStack(), _regionStack(),
// _finger set in set_non_marking_state // _finger set in set_non_marking_state
_max_task_num(MAX2(ParallelGCThreads, (size_t)1)), _max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
// _active_tasks set in set_non_marking_state // _active_tasks set in set_non_marking_state
// _tasks set inside the constructor // _tasks set inside the constructor
_task_queues(new CMTaskQueueSet((int) _max_task_num)), _task_queues(new CMTaskQueueSet((int) _max_task_num)),
@ -506,7 +506,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_cleanup_times(), _cleanup_times(),
_total_counting_time(0.0), _total_counting_time(0.0),
_total_rs_scrub_time(0.0), _total_rs_scrub_time(0.0),
_parallel_workers(NULL) { _parallel_workers(NULL) {
CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
if (verbose_level < no_verbose) { if (verbose_level < no_verbose) {
@ -568,7 +567,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
// notice that ConcGCThreads overwrites G1MarkingOverheadPercent // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
// if both are set // if both are set
_parallel_marking_threads = ConcGCThreads; _parallel_marking_threads = (uint) ConcGCThreads;
_max_parallel_marking_threads = _parallel_marking_threads; _max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
@ -589,12 +588,12 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
double sleep_factor = double sleep_factor =
(1.0 - marking_task_overhead) / marking_task_overhead; (1.0 - marking_task_overhead) / marking_task_overhead;
_parallel_marking_threads = (size_t) marking_thread_num; _parallel_marking_threads = (uint) marking_thread_num;
_max_parallel_marking_threads = _parallel_marking_threads; _max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = sleep_factor; _sleep_factor = sleep_factor;
_marking_task_overhead = marking_task_overhead; _marking_task_overhead = marking_task_overhead;
} else { } else {
_parallel_marking_threads = scale_parallel_threads(ParallelGCThreads); _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
_max_parallel_marking_threads = _parallel_marking_threads; _max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
@ -618,7 +617,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
guarantee(parallel_marking_threads() > 0, "peace of mind"); guarantee(parallel_marking_threads() > 0, "peace of mind");
_parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
(int) _max_parallel_marking_threads, false, true); _max_parallel_marking_threads, false, true);
if (_parallel_workers == NULL) { if (_parallel_workers == NULL) {
vm_exit_during_initialization("Failed necessary allocation."); vm_exit_during_initialization("Failed necessary allocation.");
} else { } else {
@ -691,7 +690,7 @@ void ConcurrentMark::reset() {
set_concurrent_marking_in_progress(); set_concurrent_marking_in_progress();
} }
void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
assert(active_tasks <= _max_task_num, "we should not have more"); assert(active_tasks <= _max_task_num, "we should not have more");
_active_tasks = active_tasks; _active_tasks = active_tasks;
@ -1048,7 +1047,7 @@ private:
ConcurrentMarkThread* _cmt; ConcurrentMarkThread* _cmt;
public: public:
void work(int worker_i) { void work(uint worker_id) {
assert(Thread::current()->is_ConcurrentGC_thread(), assert(Thread::current()->is_ConcurrentGC_thread(),
"this should only be done by a conc GC thread"); "this should only be done by a conc GC thread");
ResourceMark rm; ResourceMark rm;
@ -1057,8 +1056,8 @@ public:
ConcurrentGCThread::stsJoin(); ConcurrentGCThread::stsJoin();
assert((size_t) worker_i < _cm->active_tasks(), "invariant"); assert(worker_id < _cm->active_tasks(), "invariant");
CMTask* the_task = _cm->task(worker_i); CMTask* the_task = _cm->task(worker_id);
the_task->record_start_time(); the_task->record_start_time();
if (!_cm->has_aborted()) { if (!_cm->has_aborted()) {
do { do {
@ -1076,7 +1075,7 @@ public:
double elapsed_time_sec = end_time_sec - start_time_sec; double elapsed_time_sec = end_time_sec - start_time_sec;
_cm->clear_has_overflown(); _cm->clear_has_overflown();
bool ret = _cm->do_yield_check(worker_i); bool ret = _cm->do_yield_check(worker_id);
jlong sleep_time_ms; jlong sleep_time_ms;
if (!_cm->has_aborted() && the_task->has_aborted()) { if (!_cm->has_aborted() && the_task->has_aborted()) {
@ -1105,7 +1104,7 @@ public:
ConcurrentGCThread::stsLeave(); ConcurrentGCThread::stsLeave();
double end_vtime = os::elapsedVTime(); double end_vtime = os::elapsedVTime();
_cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
} }
CMConcurrentMarkingTask(ConcurrentMark* cm, CMConcurrentMarkingTask(ConcurrentMark* cm,
@ -1117,9 +1116,9 @@ public:
// Calculates the number of active workers for a concurrent // Calculates the number of active workers for a concurrent
// phase. // phase.
size_t ConcurrentMark::calc_parallel_marking_threads() { uint ConcurrentMark::calc_parallel_marking_threads() {
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t n_conc_workers = 0; uint n_conc_workers = 0;
if (!UseDynamicNumberOfGCThreads || if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) && (!FLAG_IS_DEFAULT(ConcGCThreads) &&
!ForceDynamicNumberOfGCThreads)) { !ForceDynamicNumberOfGCThreads)) {
@ -1159,7 +1158,7 @@ void ConcurrentMark::markFromRoots() {
assert(parallel_marking_threads() <= max_parallel_marking_threads(), assert(parallel_marking_threads() <= max_parallel_marking_threads(),
"Maximum number of marking threads exceeded"); "Maximum number of marking threads exceeded");
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); uint active_workers = MAX2(1U, parallel_marking_threads());
// Parallel task terminator is set in "set_phase()" // Parallel task terminator is set in "set_phase()"
set_phase(active_workers, true /* concurrent */); set_phase(active_workers, true /* concurrent */);
@ -1503,7 +1502,7 @@ class G1ParFinalCountTask: public AbstractGangTask {
protected: protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
CMBitMap* _bm; CMBitMap* _bm;
size_t _n_workers; uint _n_workers;
size_t *_live_bytes; size_t *_live_bytes;
size_t *_used_bytes; size_t *_used_bytes;
BitMap* _region_bm; BitMap* _region_bm;
@ -1535,13 +1534,13 @@ public:
FREE_C_HEAP_ARRAY(size_t, _used_bytes); FREE_C_HEAP_ARRAY(size_t, _used_bytes);
} }
void work(int i) { void work(uint worker_id) {
CalcLiveObjectsClosure calccl(true /*final*/, CalcLiveObjectsClosure calccl(true /*final*/,
_bm, _g1h->concurrent_mark(), _bm, _g1h->concurrent_mark(),
_region_bm, _card_bm); _region_bm, _card_bm);
calccl.no_yield(); calccl.no_yield();
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&calccl, i, _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
(int) _n_workers, (int) _n_workers,
HeapRegion::FinalCountClaimValue); HeapRegion::FinalCountClaimValue);
} else { } else {
@ -1549,19 +1548,19 @@ public:
} }
assert(calccl.complete(), "Shouldn't have yielded!"); assert(calccl.complete(), "Shouldn't have yielded!");
assert((size_t) i < _n_workers, "invariant"); assert(worker_id < _n_workers, "invariant");
_live_bytes[i] = calccl.tot_live(); _live_bytes[worker_id] = calccl.tot_live();
_used_bytes[i] = calccl.tot_used(); _used_bytes[worker_id] = calccl.tot_used();
} }
size_t live_bytes() { size_t live_bytes() {
size_t live_bytes = 0; size_t live_bytes = 0;
for (size_t i = 0; i < _n_workers; ++i) for (uint i = 0; i < _n_workers; ++i)
live_bytes += _live_bytes[i]; live_bytes += _live_bytes[i];
return live_bytes; return live_bytes;
} }
size_t used_bytes() { size_t used_bytes() {
size_t used_bytes = 0; size_t used_bytes = 0;
for (size_t i = 0; i < _n_workers; ++i) for (uint i = 0; i < _n_workers; ++i)
used_bytes += _used_bytes[i]; used_bytes += _used_bytes[i];
return used_bytes; return used_bytes;
} }
@ -1646,18 +1645,18 @@ public:
AbstractGangTask("G1 note end"), _g1h(g1h), AbstractGangTask("G1 note end"), _g1h(g1h),
_max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
void work(int i) { void work(uint worker_id) {
double start = os::elapsedTime(); double start = os::elapsedTime();
FreeRegionList local_cleanup_list("Local Cleanup List"); FreeRegionList local_cleanup_list("Local Cleanup List");
OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
HRRSCleanupTask hrrs_cleanup_task; HRRSCleanupTask hrrs_cleanup_task;
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
&old_proxy_set, &old_proxy_set,
&humongous_proxy_set, &humongous_proxy_set,
&hrrs_cleanup_task); &hrrs_cleanup_task);
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i, _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
_g1h->workers()->active_workers(), _g1h->workers()->active_workers(),
HeapRegion::NoteEndClaimValue); HeapRegion::NoteEndClaimValue);
} else { } else {
@ -1701,8 +1700,8 @@ public:
double end = os::elapsedTime(); double end = os::elapsedTime();
if (G1PrintParCleanupStats) { if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
"claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
i, start, end, (end-start)*1000.0, worker_id, start, end, (end-start)*1000.0,
g1_note_end.regions_claimed(), g1_note_end.regions_claimed(),
g1_note_end.claimed_region_time_sec()*1000.0, g1_note_end.claimed_region_time_sec()*1000.0,
g1_note_end.max_region_time_sec()*1000.0); g1_note_end.max_region_time_sec()*1000.0);
@ -1724,9 +1723,9 @@ public:
_region_bm(region_bm), _card_bm(card_bm) _region_bm(region_bm), _card_bm(card_bm)
{} {}
void work(int i) { void work(uint worker_id) {
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1rs->scrub_par(_region_bm, _card_bm, i, _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
HeapRegion::ScrubRemSetClaimValue); HeapRegion::ScrubRemSetClaimValue);
} else { } else {
_g1rs->scrub(_region_bm, _card_bm); _g1rs->scrub(_region_bm, _card_bm);
@ -1766,7 +1765,7 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks(); HeapRegionRemSet::reset_for_cleanup_tasks();
size_t n_workers; uint n_workers;
// Do counting once more with the world stopped for good measure. // Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@ -1778,7 +1777,7 @@ void ConcurrentMark::cleanup() {
g1h->set_par_threads(); g1h->set_par_threads();
n_workers = g1h->n_par_threads(); n_workers = g1h->n_par_threads();
assert(g1h->n_par_threads() == (int) n_workers, assert(g1h->n_par_threads() == n_workers,
"Should not have been reset"); "Should not have been reset");
g1h->workers()->run_task(&g1_par_count_task); g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0. // Done with the parallel phase so reset to 0.
@ -2169,13 +2168,13 @@ public:
AbstractGangTask("Process reference objects in parallel"), AbstractGangTask("Process reference objects in parallel"),
_proc_task(proc_task), _g1h(g1h), _cm(cm) { } _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
virtual void work(int i) { virtual void work(uint worker_id) {
CMTask* marking_task = _cm->task(i); CMTask* marking_task = _cm->task(worker_id);
G1CMIsAliveClosure g1_is_alive(_g1h); G1CMIsAliveClosure g1_is_alive(_g1h);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
_proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
} }
}; };
@ -2201,8 +2200,8 @@ public:
AbstractGangTask("Enqueue reference objects in parallel"), AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task) { } _enq_task(enq_task) { }
virtual void work(int i) { virtual void work(uint worker_id) {
_enq_task.work(i); _enq_task.work(worker_id);
} }
}; };
@ -2249,8 +2248,8 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// We use the work gang from the G1CollectedHeap and we utilize all // We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads. // the worker threads.
int active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1; uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); active_workers = MAX2(MIN2(active_workers, _max_task_num), 1U);
G1CMRefProcTaskExecutor par_task_executor(g1h, this, G1CMRefProcTaskExecutor par_task_executor(g1h, this,
g1h->workers(), active_workers); g1h->workers(), active_workers);
@ -2314,11 +2313,11 @@ private:
ConcurrentMark *_cm; ConcurrentMark *_cm;
public: public:
void work(int worker_i) { void work(uint worker_id) {
// Since all available tasks are actually started, we should // Since all available tasks are actually started, we should
// only proceed if we're supposed to be actived. // only proceed if we're supposed to be actived.
if ((size_t)worker_i < _cm->active_tasks()) { if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_i); CMTask* task = _cm->task(worker_id);
task->record_start_time(); task->record_start_time();
do { do {
task->do_marking_step(1000000000.0 /* something very large */, task->do_marking_step(1000000000.0 /* something very large */,
@ -2347,10 +2346,10 @@ void ConcurrentMark::checkpointRootsFinalWork() {
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
G1CollectedHeap::StrongRootsScope srs(g1h); G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all active threads // this is remark, so we'll use up all active threads
int active_workers = g1h->workers()->active_workers(); uint active_workers = g1h->workers()->active_workers();
if (active_workers == 0) { if (active_workers == 0) {
assert(active_workers > 0, "Should have been set earlier"); assert(active_workers > 0, "Should have been set earlier");
active_workers = ParallelGCThreads; active_workers = (uint) ParallelGCThreads;
g1h->workers()->set_active_workers(active_workers); g1h->workers()->set_active_workers(active_workers);
} }
set_phase(active_workers, false /* concurrent */); set_phase(active_workers, false /* concurrent */);
@ -2366,7 +2365,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
} else { } else {
G1CollectedHeap::StrongRootsScope srs(g1h); G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all available threads // this is remark, so we'll use up all available threads
int active_workers = 1; uint active_workers = 1;
set_phase(active_workers, false /* concurrent */); set_phase(active_workers, false /* concurrent */);
CMRemarkTask remarkTask(this, active_workers); CMRemarkTask remarkTask(this, active_workers);
@ -2921,7 +2920,7 @@ class CSetMarkOopClosure: public OopClosure {
int _ms_size; int _ms_size;
int _ms_ind; int _ms_ind;
int _array_increment; int _array_increment;
int _worker_i; uint _worker_id;
bool push(oop obj, int arr_ind = 0) { bool push(oop obj, int arr_ind = 0) {
if (_ms_ind == _ms_size) { if (_ms_ind == _ms_size) {
@ -2971,7 +2970,7 @@ class CSetMarkOopClosure: public OopClosure {
} }
public: public:
CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, int worker_i) : CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_cm(cm), _cm(cm),
_bm(cm->nextMarkBitMap()), _bm(cm->nextMarkBitMap()),
@ -2979,7 +2978,7 @@ public:
_ms(NEW_C_HEAP_ARRAY(oop, ms_size)), _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
_array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
_array_increment(MAX2(ms_size/8, 16)), _array_increment(MAX2(ms_size/8, 16)),
_worker_i(worker_i) { } _worker_id(worker_id) { }
~CSetMarkOopClosure() { ~CSetMarkOopClosure() {
FREE_C_HEAP_ARRAY(oop, _ms); FREE_C_HEAP_ARRAY(oop, _ms);
@ -3024,14 +3023,14 @@ class CSetMarkBitMapClosure: public BitMapClosure {
CMBitMap* _bitMap; CMBitMap* _bitMap;
ConcurrentMark* _cm; ConcurrentMark* _cm;
CSetMarkOopClosure _oop_cl; CSetMarkOopClosure _oop_cl;
int _worker_i; uint _worker_id;
public: public:
CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_i) : CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_bitMap(cm->nextMarkBitMap()), _bitMap(cm->nextMarkBitMap()),
_oop_cl(cm, ms_size, worker_i), _oop_cl(cm, ms_size, worker_id),
_worker_i(worker_i) { } _worker_id(worker_id) { }
bool do_bit(size_t offset) { bool do_bit(size_t offset) {
// convert offset into a HeapWord* // convert offset into a HeapWord*
@ -3056,17 +3055,17 @@ public:
class CompleteMarkingInCSetHRClosure: public HeapRegionClosure { class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
CMBitMap* _bm; CMBitMap* _bm;
CSetMarkBitMapClosure _bit_cl; CSetMarkBitMapClosure _bit_cl;
int _worker_i; uint _worker_id;
enum SomePrivateConstants { enum SomePrivateConstants {
MSSize = 1000 MSSize = 1000
}; };
public: public:
CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_i) : CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
_bm(cm->nextMarkBitMap()), _bm(cm->nextMarkBitMap()),
_bit_cl(cm, MSSize, worker_i), _bit_cl(cm, MSSize, worker_id),
_worker_i(worker_i) { } _worker_id(worker_id) { }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) { if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
@ -3109,9 +3108,9 @@ public:
AbstractGangTask("Complete Mark in CSet"), AbstractGangTask("Complete Mark in CSet"),
_g1h(g1h), _cm(cm) { } _g1h(g1h), _cm(cm) { }
void work(int worker_i) { void work(uint worker_id) {
CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i); CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i); HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &cmplt); _g1h->collection_set_iterate_from(hr, &cmplt);
} }
}; };
@ -3307,13 +3306,13 @@ void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
// the CMS bit map. Called at the first checkpoint. // the CMS bit map. Called at the first checkpoint.
// We take a break if someone is trying to stop the world. // We take a break if someone is trying to stop the world.
bool ConcurrentMark::do_yield_check(int worker_i) { bool ConcurrentMark::do_yield_check(uint worker_id) {
if (should_yield()) { if (should_yield()) {
if (worker_i == 0) { if (worker_id == 0) {
_g1h->g1_policy()->record_concurrent_pause(); _g1h->g1_policy()->record_concurrent_pause();
} }
cmThread()->yield(); cmThread()->yield();
if (worker_i == 0) { if (worker_id == 0) {
_g1h->g1_policy()->record_concurrent_pause_end(); _g1h->g1_policy()->record_concurrent_pause_end();
} }
return true; return true;

View File

@ -374,9 +374,9 @@ class ConcurrentMark: public CHeapObj {
protected: protected:
ConcurrentMarkThread* _cmThread; // the thread doing the work ConcurrentMarkThread* _cmThread; // the thread doing the work
G1CollectedHeap* _g1h; // the heap. G1CollectedHeap* _g1h; // the heap.
size_t _parallel_marking_threads; // the number of marking uint _parallel_marking_threads; // the number of marking
// threads we're use // threads we're use
size_t _max_parallel_marking_threads; // max number of marking uint _max_parallel_marking_threads; // max number of marking
// threads we'll ever use // threads we'll ever use
double _sleep_factor; // how much we have to sleep, with double _sleep_factor; // how much we have to sleep, with
// respect to the work we just did, to // respect to the work we just did, to
@ -412,8 +412,8 @@ protected:
// last claimed region // last claimed region
// marking tasks // marking tasks
size_t _max_task_num; // maximum task number uint _max_task_num; // maximum task number
size_t _active_tasks; // task num currently active uint _active_tasks; // task num currently active
CMTask** _tasks; // task queue array (max_task_num len) CMTask** _tasks; // task queue array (max_task_num len)
CMTaskQueueSet* _task_queues; // task queue set CMTaskQueueSet* _task_queues; // task queue set
ParallelTaskTerminator _terminator; // for termination ParallelTaskTerminator _terminator; // for termination
@ -492,7 +492,7 @@ protected:
// It should be called to indicate which phase we're in (concurrent // It should be called to indicate which phase we're in (concurrent
// mark or remark) and how many threads are currently active. // mark or remark) and how many threads are currently active.
void set_phase(size_t active_tasks, bool concurrent); void set_phase(uint active_tasks, bool concurrent);
// We do this after we're done with marking so that the marking data // We do this after we're done with marking so that the marking data
// structures are initialised to a sensible and predictable state. // structures are initialised to a sensible and predictable state.
void set_non_marking_state(); void set_non_marking_state();
@ -505,8 +505,8 @@ protected:
} }
// accessor methods // accessor methods
size_t parallel_marking_threads() { return _parallel_marking_threads; } uint parallel_marking_threads() { return _parallel_marking_threads; }
size_t max_parallel_marking_threads() { return _max_parallel_marking_threads;} uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; } double sleep_factor() { return _sleep_factor; }
double marking_task_overhead() { return _marking_task_overhead;} double marking_task_overhead() { return _marking_task_overhead;}
double cleanup_sleep_factor() { return _cleanup_sleep_factor; } double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
@ -514,7 +514,7 @@ protected:
HeapWord* finger() { return _finger; } HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; } bool concurrent() { return _concurrent; }
size_t active_tasks() { return _active_tasks; } uint active_tasks() { return _active_tasks; }
ParallelTaskTerminator* terminator() { return &_terminator; } ParallelTaskTerminator* terminator() { return &_terminator; }
// It claims the next available region to be scanned by a marking // It claims the next available region to be scanned by a marking
@ -715,10 +715,10 @@ public:
// Returns the number of GC threads to be used in a concurrent // Returns the number of GC threads to be used in a concurrent
// phase based on the number of GC threads being used in a STW // phase based on the number of GC threads being used in a STW
// phase. // phase.
size_t scale_parallel_threads(size_t n_par_threads); uint scale_parallel_threads(uint n_par_threads);
// Calculates the number of GC threads to be used in a concurrent phase. // Calculates the number of GC threads to be used in a concurrent phase.
size_t calc_parallel_marking_threads(); uint calc_parallel_marking_threads();
// The following three are interaction between CM and // The following three are interaction between CM and
// G1CollectedHeap // G1CollectedHeap
@ -873,7 +873,7 @@ public:
return _prevMarkBitMap->isMarked(addr); return _prevMarkBitMap->isMarked(addr);
} }
inline bool do_yield_check(int worker_i = 0); inline bool do_yield_check(uint worker_i = 0);
inline bool should_yield(); inline bool should_yield();
// Called to abort the marking cycle after a Full GC takes palce. // Called to abort the marking cycle after a Full GC takes palce.

View File

@ -1165,9 +1165,9 @@ public:
_g1(g1) _g1(g1)
{ } { }
void work(int i) { void work(uint worker_id) {
RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
_g1->heap_region_par_iterate_chunked(&rebuild_rs, i, _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
_g1->workers()->active_workers(), _g1->workers()->active_workers(),
HeapRegion::RebuildRSClaimValue); HeapRegion::RebuildRSClaimValue);
} }
@ -1374,7 +1374,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Rebuild remembered sets of all regions. // Rebuild remembered sets of all regions.
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers = uint n_workers =
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(), workers()->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
@ -2519,11 +2519,11 @@ void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
void void
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
int worker, uint worker,
int no_of_par_workers, uint no_of_par_workers,
jint claim_value) { jint claim_value) {
const size_t regions = n_regions(); const size_t regions = n_regions();
const size_t max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
no_of_par_workers : no_of_par_workers :
1); 1);
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
@ -2739,7 +2739,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
result = g1_policy()->collection_set(); result = g1_policy()->collection_set();
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t cs_size = g1_policy()->cset_region_length(); size_t cs_size = g1_policy()->cset_region_length();
int active_workers = workers()->active_workers(); uint active_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
active_workers == workers()->total_workers(), active_workers == workers()->total_workers(),
"Unless dynamic should use total workers"); "Unless dynamic should use total workers");
@ -3075,10 +3075,10 @@ public:
return _failures; return _failures;
} }
void work(int worker_i) { void work(uint worker_id) {
HandleMark hm; HandleMark hm;
VerifyRegionClosure blk(_allow_dirty, true, _vo); VerifyRegionClosure blk(_allow_dirty, true, _vo);
_g1h->heap_region_par_iterate_chunked(&blk, worker_i, _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
_g1h->workers()->active_workers(), _g1h->workers()->active_workers(),
HeapRegion::ParVerifyClaimValue); HeapRegion::ParVerifyClaimValue);
if (blk.failures()) { if (blk.failures()) {
@ -4725,7 +4725,7 @@ protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
RefToScanQueueSet *_queues; RefToScanQueueSet *_queues;
ParallelTaskTerminator _terminator; ParallelTaskTerminator _terminator;
int _n_workers; uint _n_workers;
Mutex _stats_lock; Mutex _stats_lock;
Mutex* stats_lock() { return &_stats_lock; } Mutex* stats_lock() { return &_stats_lock; }
@ -4765,18 +4765,18 @@ public:
_n_workers = active_workers; _n_workers = active_workers;
} }
void work(int i) { void work(uint worker_id) {
if (i >= _n_workers) return; // no work needed this round if (worker_id >= _n_workers) return; // no work needed this round
double start_time_ms = os::elapsedTime() * 1000.0; double start_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); _g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
ReferenceProcessor* rp = _g1h->ref_processor_stw(); ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState pss(_g1h, i); G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp); G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
@ -4808,7 +4808,7 @@ public:
scan_root_cl, scan_root_cl,
&push_heap_rs_cl, &push_heap_rs_cl,
scan_perm_cl, scan_perm_cl,
i); worker_id);
pss.end_strong_roots(); pss.end_strong_roots();
{ {
@ -4817,8 +4817,8 @@ public:
evac.do_void(); evac.do_void();
double elapsed_ms = (os::elapsedTime()-start)*1000.0; double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0; double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); _g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); _g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
} }
_g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1); _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@ -4828,12 +4828,12 @@ public:
if (ParallelGCVerbose) { if (ParallelGCVerbose) {
MutexLocker x(stats_lock()); MutexLocker x(stats_lock());
pss.print_termination_stats(i); pss.print_termination_stats(worker_id);
} }
assert(pss.refs()->is_empty(), "should be empty"); assert(pss.refs()->is_empty(), "should be empty");
double end_time_ms = os::elapsedTime() * 1000.0; double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); _g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
} }
}; };
@ -5091,14 +5091,14 @@ public:
_terminator(terminator) _terminator(terminator)
{} {}
virtual void work(int i) { virtual void work(uint worker_id) {
// The reference processing task executed by a single worker. // The reference processing task executed by a single worker.
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
G1STWIsAliveClosure is_alive(_g1h); G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState pss(_g1h, i); G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
@ -5130,7 +5130,7 @@ public:
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
// Call the reference processing task's work routine. // Call the reference processing task's work routine.
_proc_task.work(i, is_alive, keep_alive, drain_queue); _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
// Note we cannot assert that the refs array is empty here as not all // Note we cannot assert that the refs array is empty here as not all
// of the processing tasks (specifically phase2 - pp2_work) execute // of the processing tasks (specifically phase2 - pp2_work) execute
@ -5165,8 +5165,8 @@ public:
_enq_task(enq_task) _enq_task(enq_task)
{ } { }
virtual void work(int i) { virtual void work(uint worker_id) {
_enq_task.work(i); _enq_task.work(worker_id);
} }
}; };
@ -5195,7 +5195,7 @@ protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
RefToScanQueueSet *_queues; RefToScanQueueSet *_queues;
ParallelTaskTerminator _terminator; ParallelTaskTerminator _terminator;
int _n_workers; uint _n_workers;
public: public:
G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) : G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
@ -5206,11 +5206,11 @@ public:
_n_workers(workers) _n_workers(workers)
{ } { }
void work(int i) { void work(uint worker_id) {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
G1ParScanThreadState pss(_g1h, i); G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
@ -5246,17 +5246,17 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_cm(); ReferenceProcessor* rp = _g1h->ref_processor_cm();
int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q(); uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
int stride = MIN2(MAX2(_n_workers, 1), limit); uint stride = MIN2(MAX2(_n_workers, 1U), limit);
// limit is set using max_num_q() - which was set using ParallelGCThreads. // limit is set using max_num_q() - which was set using ParallelGCThreads.
// So this must be true - but assert just in case someone decides to // So this must be true - but assert just in case someone decides to
// change the worker ids. // change the worker ids.
assert(0 <= i && i < limit, "sanity"); assert(0 <= worker_id && worker_id < limit, "sanity");
assert(!rp->discovery_is_atomic(), "check this code"); assert(!rp->discovery_is_atomic(), "check this code");
// Select discovered lists [i, i+stride, i+2*stride,...,limit) // Select discovered lists [i, i+stride, i+2*stride,...,limit)
for (int idx = i; idx < limit; idx += stride) { for (uint idx = worker_id; idx < limit; idx += stride) {
DiscoveredList& ref_list = rp->discovered_refs()[idx]; DiscoveredList& ref_list = rp->discovered_refs()[idx];
DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive); DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
@ -5310,7 +5310,7 @@ void G1CollectedHeap::process_discovered_references() {
// referents points to another object which is also referenced by an // referents points to another object which is also referenced by an
// object discovered by the STW ref processor. // object discovered by the STW ref processor.
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1); workers()->active_workers() : 1);
assert(!G1CollectedHeap::use_parallel_gc_threads() || assert(!G1CollectedHeap::use_parallel_gc_threads() ||
@ -5416,7 +5416,7 @@ void G1CollectedHeap::enqueue_discovered_references() {
} else { } else {
// Parallel reference enqueuing // Parallel reference enqueuing
int active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1); uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
assert(active_workers == workers()->active_workers(), assert(active_workers == workers()->active_workers(),
"Need to reset active_workers"); "Need to reset active_workers");
assert(rp->num_q() == active_workers, "sanity"); assert(rp->num_q() == active_workers, "sanity");
@ -5445,7 +5445,7 @@ void G1CollectedHeap::evacuate_collection_set() {
concurrent_g1_refine()->set_use_cache(false); concurrent_g1_refine()->set_use_cache(false);
concurrent_g1_refine()->clear_hot_cache_claimed_index(); concurrent_g1_refine()->clear_hot_cache_claimed_index();
int n_workers; uint n_workers;
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
n_workers = n_workers =
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
@ -5658,7 +5658,7 @@ public:
AbstractGangTask("G1 Par Cleanup CT Task"), AbstractGangTask("G1 Par Cleanup CT Task"),
_ct_bs(ct_bs), _g1h(g1h) { } _ct_bs(ct_bs), _g1h(g1h) { }
void work(int i) { void work(uint worker_id) {
HeapRegion* r; HeapRegion* r;
while (r = _g1h->pop_dirty_cards_region()) { while (r = _g1h->pop_dirty_cards_region()) {
clear_cards(r); clear_cards(r);
@ -6141,7 +6141,7 @@ void G1CollectedHeap::set_par_threads() {
// Don't change the number of workers. Use the value previously set // Don't change the number of workers. Use the value previously set
// in the workgroup. // in the workgroup.
assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise"); assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
int n_workers = workers()->active_workers(); uint n_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(), n_workers == workers()->total_workers(),
"Otherwise should be using the total number of workers"); "Otherwise should be using the total number of workers");

View File

@ -995,7 +995,7 @@ public:
// Initialize weak reference processing. // Initialize weak reference processing.
virtual void ref_processing_init(); virtual void ref_processing_init();
void set_par_threads(int t) { void set_par_threads(uint t) {
SharedHeap::set_par_threads(t); SharedHeap::set_par_threads(t);
// Done in SharedHeap but oddly there are // Done in SharedHeap but oddly there are
// two _process_strong_tasks's in a G1CollectedHeap // two _process_strong_tasks's in a G1CollectedHeap
@ -1298,8 +1298,8 @@ public:
// chunk.) For now requires that "doHeapRegion" always returns "false", // chunk.) For now requires that "doHeapRegion" always returns "false",
// i.e., that a closure never attempt to abort a traversal. // i.e., that a closure never attempt to abort a traversal.
void heap_region_par_iterate_chunked(HeapRegionClosure* blk, void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
int worker, uint worker,
int no_of_par_workers, uint no_of_par_workers,
jint claim_value); jint claim_value);
// It resets all the region claim values to the default. // It resets all the region claim values to the default.

View File

@ -136,7 +136,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_stop_world_start(0.0), _stop_world_start(0.0),
_all_stop_world_times_ms(new NumberSeq()), _all_stop_world_times_ms(new NumberSeq()),
_all_yield_times_ms(new NumberSeq()), _all_yield_times_ms(new NumberSeq()),
_using_new_ratio_calculations(false),
_summary(new Summary()), _summary(new Summary()),
@ -230,7 +229,9 @@ G1CollectorPolicy::G1CollectorPolicy() :
_inc_cset_bytes_used_before(0), _inc_cset_bytes_used_before(0),
_inc_cset_max_finger(NULL), _inc_cset_max_finger(NULL),
_inc_cset_recorded_rs_lengths(0), _inc_cset_recorded_rs_lengths(0),
_inc_cset_recorded_rs_lengths_diffs(0),
_inc_cset_predicted_elapsed_time_ms(0.0), _inc_cset_predicted_elapsed_time_ms(0.0),
_inc_cset_predicted_elapsed_time_ms_diffs(0.0),
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
@ -407,11 +408,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
initialize_all(); initialize_all();
_collectionSetChooser = new CollectionSetChooser(); _collectionSetChooser = new CollectionSetChooser();
} _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
// Increment "i", mod "len"
static void inc_mod(int& i, int len) {
i++; if (i == len) i = 0;
} }
void G1CollectorPolicy::initialize_flags() { void G1CollectorPolicy::initialize_flags() {
@ -423,39 +420,74 @@ void G1CollectorPolicy::initialize_flags() {
CollectorPolicy::initialize_flags(); CollectorPolicy::initialize_flags();
} }
// The easiest way to deal with the parsing of the NewSize / G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
// MaxNewSize / etc. parameteres is to re-use the code in the assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
// TwoGenerationCollectorPolicy class. This is similar to what assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
// ParallelScavenge does with its GenerationSizer class (see assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start. if (FLAG_IS_CMDLINE(NewRatio)) {
class G1YoungGenSizer : public TwoGenerationCollectorPolicy { if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
private: warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
size_t size_to_region_num(size_t byte_size) { } else {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes); _sizer_kind = SizerNewRatio;
_adaptive_size = false;
return;
}
} }
public: if (FLAG_IS_CMDLINE(NewSize)) {
G1YoungGenSizer() { _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
initialize_flags(); if (FLAG_IS_CMDLINE(MaxNewSize)) {
initialize_size_info(); _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
_sizer_kind = SizerMaxAndNewSize;
_adaptive_size = _min_desired_young_length == _max_desired_young_length;
} else {
_sizer_kind = SizerNewSizeOnly;
} }
size_t min_young_region_num() { } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
return size_to_region_num(_min_gen0_size); _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
_sizer_kind = SizerMaxNewSizeOnly;
} }
size_t initial_young_region_num() {
return size_to_region_num(_initial_gen0_size);
} }
size_t max_young_region_num() {
return size_to_region_num(_max_gen0_size);
}
};
void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) { size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
assert(number_of_heap_regions > 0, "Heap must be initialized"); size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
size_t young_size = number_of_heap_regions / (NewRatio + 1); return MAX2((size_t)1, default_value);
_min_desired_young_length = young_size; }
_max_desired_young_length = young_size;
size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
return MAX2((size_t)1, default_value);
}
void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
switch (_sizer_kind) {
case SizerDefaults:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
break;
case SizerNewSizeOnly:
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
_max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
break;
case SizerMaxNewSizeOnly:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
break;
case SizerMaxAndNewSize:
// Do nothing. Values set on the command line, don't update them at runtime.
break;
case SizerNewRatio:
_min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
_max_desired_young_length = _min_desired_young_length;
break;
default:
ShouldNotReachHere();
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
} }
void G1CollectorPolicy::init() { void G1CollectorPolicy::init() {
@ -466,28 +498,10 @@ void G1CollectorPolicy::init() {
initialize_gc_policy_counters(); initialize_gc_policy_counters();
G1YoungGenSizer sizer;
_min_desired_young_length = sizer.min_young_region_num();
_max_desired_young_length = sizer.max_young_region_num();
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
// Treat NewRatio as a fixed size that is only recalculated when the heap size changes
update_young_list_size_using_newratio(_g1->n_regions());
_using_new_ratio_calculations = true;
}
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
if (adaptive_young_list_length()) { if (adaptive_young_list_length()) {
_young_list_fixed_length = 0; _young_list_fixed_length = 0;
} else { } else {
assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ"); _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
_young_list_fixed_length = _min_desired_young_length;
} }
_free_regions_at_end_of_collection = _g1->free_regions(); _free_regions_at_end_of_collection = _g1->free_regions();
update_young_list_target_length(); update_young_list_target_length();
@ -541,11 +555,7 @@ void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
// smaller than 1.0) we'll get 1. // smaller than 1.0) we'll get 1.
_reserve_regions = (size_t) ceil(reserve_regions_d); _reserve_regions = (size_t) ceil(reserve_regions_d);
if (_using_new_ratio_calculations) { _young_gen_sizer->heap_size_changed(new_number_of_regions);
// -XX:NewRatio was specified so we need to update the
// young gen length when the heap size has changed.
update_young_list_size_using_newratio(new_number_of_regions);
}
} }
size_t G1CollectorPolicy::calculate_young_list_desired_min_length( size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
@ -563,14 +573,14 @@ size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
} }
desired_min_length += base_min_length; desired_min_length += base_min_length;
// make sure we don't go below any user-defined minimum bound // make sure we don't go below any user-defined minimum bound
return MAX2(_min_desired_young_length, desired_min_length); return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
} }
size_t G1CollectorPolicy::calculate_young_list_desired_max_length() { size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
// Here, we might want to also take into account any additional // Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we // constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound. // effectively don't set this bound.
return _max_desired_young_length; return _young_gen_sizer->max_desired_young_length();
} }
void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
@ -1551,10 +1561,19 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
} }
} }
// It turns out that, sometimes, _max_rs_lengths can get smaller // This is defensive. For a while _max_rs_lengths could get
// than _recorded_rs_lengths which causes rs_length_diff to get // smaller than _recorded_rs_lengths which was causing
// very large and mess up the RSet length predictions. We'll be // rs_length_diff to get very large and mess up the RSet length
// defensive until we work out why this happens. // predictions. The reason was unsafe concurrent updates to the
// _inc_cset_recorded_rs_lengths field which the code below guards
// against (see CR 7118202). This bug has now been fixed (see CR
// 7119027). However, I'm still worried that
// _inc_cset_recorded_rs_lengths might still end up somewhat
// inaccurate. The concurrent refinement thread calculates an
// RSet's length concurrently with other CR threads updating it
// which might cause it to calculate the length incorrectly (if,
// say, it's in mid-coarsening). So I'll leave in the defensive
// conditional below just in case.
size_t rs_length_diff = 0; size_t rs_length_diff = 0;
if (_max_rs_lengths > _recorded_rs_lengths) { if (_max_rs_lengths > _recorded_rs_lengths) {
rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
@ -2321,17 +2340,19 @@ public:
_g1(G1CollectedHeap::heap()) _g1(G1CollectedHeap::heap())
{} {}
void work(int i) { void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i); ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
_chunk_size,
worker_id);
// Back to zero for the claim value. // Back to zero for the claim value.
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
_g1->workers()->active_workers(), _g1->workers()->active_workers(),
HeapRegion::InitialClaimValue); HeapRegion::InitialClaimValue);
jint regions_added = parKnownGarbageCl.marked_regions_added(); jint regions_added = parKnownGarbageCl.marked_regions_added();
_hrSorted->incNumMarkedHeapRegions(regions_added); _hrSorted->incNumMarkedHeapRegions(regions_added);
if (G1PrintParCleanupStats) { if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
i, parKnownGarbageCl.invokes(), regions_added); worker_id, parKnownGarbageCl.invokes(), regions_added);
} }
} }
}; };
@ -2436,10 +2457,45 @@ void G1CollectorPolicy::start_incremental_cset_building() {
_inc_cset_max_finger = 0; _inc_cset_max_finger = 0;
_inc_cset_recorded_rs_lengths = 0; _inc_cset_recorded_rs_lengths = 0;
_inc_cset_predicted_elapsed_time_ms = 0; _inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms = 0.0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
_inc_cset_build_state = Active; _inc_cset_build_state = Active;
} }
void G1CollectorPolicy::finalize_incremental_cset_building() {
assert(_inc_cset_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
// The two "main" fields, _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
_inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
if (_inc_cset_recorded_rs_lengths >= diffs) {
_inc_cset_recorded_rs_lengths -= diffs;
} else {
_inc_cset_recorded_rs_lengths = 0;
}
}
_inc_cset_predicted_elapsed_time_ms +=
_inc_cset_predicted_elapsed_time_ms_diffs;
_inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
// This routine is used when: // This routine is used when:
// * adding survivor regions to the incremental cset at the end of an // * adding survivor regions to the incremental cset at the end of an
@ -2455,10 +2511,8 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
size_t used_bytes = hr->used(); size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length; _inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_cset_bytes_used_before += used_bytes; _inc_cset_bytes_used_before += used_bytes;
// Cache the values we have added to the aggregated informtion // Cache the values we have added to the aggregated informtion
@ -2469,37 +2523,33 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
} }
void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) { void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
// This routine is currently only called as part of the updating of size_t new_rs_length) {
// existing policy information for regions in the incremental cset that // Update the CSet information that is dependent on the new RS length
// is performed by the concurrent refine thread(s) as part of young list
// RSet sampling. Therefore we should not be at a safepoint.
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
assert(hr->is_young(), "it should be");
size_t used_bytes = hr->used();
size_t old_rs_length = hr->recorded_rs_length();
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
// Subtract the old recorded/predicted policy information for
// the given heap region from the collection set info.
_inc_cset_recorded_rs_lengths -= old_rs_length;
_inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
_inc_cset_bytes_used_before -= used_bytes;
// Clear the values cached in the heap region
hr->set_recorded_rs_length(0);
hr->set_predicted_elapsed_time_ms(0);
}
void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
// Update the collection set information that is dependent on the new RS length
assert(hr->is_young(), "Precondition"); assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(),
"should not be at a safepoint");
remove_from_incremental_cset_info(hr); // We could have updated _inc_cset_recorded_rs_lengths and
add_to_incremental_cset_info(hr, new_rs_length); // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
} }
void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
@ -2591,6 +2641,7 @@ void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
double non_young_start_time_sec = os::elapsedTime(); double non_young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list(); YoungList* young_list = _g1->young_list();
finalize_incremental_cset_building();
guarantee(target_pause_time_ms > 0.0, guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive", err_msg("target_pause_time_ms = %1.6lf should be positive",

View File

@ -83,6 +83,72 @@ public:
virtual MainBodySummary* main_body_summary() { return this; } virtual MainBodySummary* main_body_summary() { return this; }
}; };
// There are three command line options related to the young gen size:
// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
// just a short form for NewSize==MaxNewSize). G1 will use its internal
// heuristics to calculate the actual young gen size, so these options
// basically only limit the range within which G1 can pick a young gen
// size. Also, these are general options taking byte sizes. G1 will
// internally work with a number of regions instead. So, some rounding
// will occur.
//
// If nothing related to the the young gen size is set on the command
// line we should allow the young gen to be between
// G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the
// heap size. This means that every time the heap size changes the
// limits for the young gen size will be updated.
//
// If only -XX:NewSize is set we should use the specified value as the
// minimum size for young gen. Still using G1DefaultMaxNewGenPercent
// of the heap as maximum.
//
// If only -XX:MaxNewSize is set we should use the specified value as the
// maximum size for young gen. Still using G1DefaultMinNewGenPercent
// of the heap as minimum.
//
// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
// No updates when the heap size changes. There is a special case when
// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
// different heuristic for calculating the collection set when we do mixed
// collection.
//
// If only -XX:NewRatio is set we should use the specified ratio of the heap
// as both min and max. This will be interpreted as "fixed" just like the
// NewSize==MaxNewSize case above. But we will update the min and max
// everytime the heap size changes.
//
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
class G1YoungGenSizer : public CHeapObj {
private:
enum SizerKind {
SizerDefaults,
SizerNewSizeOnly,
SizerMaxNewSizeOnly,
SizerMaxAndNewSize,
SizerNewRatio
};
SizerKind _sizer_kind;
size_t _min_desired_young_length;
size_t _max_desired_young_length;
bool _adaptive_size;
size_t calculate_default_min_length(size_t new_number_of_heap_regions);
size_t calculate_default_max_length(size_t new_number_of_heap_regions);
public:
G1YoungGenSizer();
void heap_size_changed(size_t new_number_of_heap_regions);
size_t min_desired_young_length() {
return _min_desired_young_length;
}
size_t max_desired_young_length() {
return _max_desired_young_length;
}
bool adaptive_young_list_length() {
return _adaptive_size;
}
};
class G1CollectorPolicy: public CollectorPolicy { class G1CollectorPolicy: public CollectorPolicy {
private: private:
// either equal to the number of parallel threads, if ParallelGCThreads // either equal to the number of parallel threads, if ParallelGCThreads
@ -167,9 +233,6 @@ private:
// indicates whether we are in young or mixed GC mode // indicates whether we are in young or mixed GC mode
bool _gcs_are_young; bool _gcs_are_young;
// if true, then it tries to dynamically adjust the length of the
// young list
bool _adaptive_young_list_length;
size_t _young_list_target_length; size_t _young_list_target_length;
size_t _young_list_fixed_length; size_t _young_list_fixed_length;
size_t _prev_eden_capacity; // used for logging size_t _prev_eden_capacity; // used for logging
@ -227,9 +290,7 @@ private:
TruncatedSeq* _young_gc_eff_seq; TruncatedSeq* _young_gc_eff_seq;
bool _using_new_ratio_calculations; G1YoungGenSizer* _young_gen_sizer;
size_t _min_desired_young_length; // as set on the command line or default calculations
size_t _max_desired_young_length; // as set on the command line or default calculations
size_t _eden_cset_region_length; size_t _eden_cset_region_length;
size_t _survivor_cset_region_length; size_t _survivor_cset_region_length;
@ -588,16 +649,29 @@ private:
// Used to record the highest end of heap region in collection set // Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger; HeapWord* _inc_cset_max_finger;
// The RSet lengths recorded for regions in the collection set // The RSet lengths recorded for regions in the CSet. It is updated
// (updated by the periodic sampling of the regions in the // by the thread that adds a new region to the CSet. We assume that
// young list/collection set). // only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths; size_t _inc_cset_recorded_rs_lengths;
// The predicted elapsed time it will take to collect the regions // A concurrent refinement thread periodcially samples the young
// in the collection set (updated by the periodic sampling of the // region RSets and needs to update _inc_cset_recorded_rs_lengths as
// regions in the young list/collection set). // the RSets grow. Instead of having to syncronize updates to that
// field we accumulate them in this field and add it to
// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_cset_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_cset_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_cset_predicted_elapsed_time_ms; double _inc_cset_predicted_elapsed_time_ms;
// See the comment for _inc_cset_recorded_rs_lengths_diffs.
double _inc_cset_predicted_elapsed_time_ms_diffs;
// Stash a pointer to the g1 heap. // Stash a pointer to the g1 heap.
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
@ -682,8 +756,6 @@ private:
// Count the number of bytes used in the CS. // Count the number of bytes used in the CS.
void count_CS_bytes_used(); void count_CS_bytes_used();
void update_young_list_size_using_newratio(size_t number_of_heap_regions);
public: public:
G1CollectorPolicy(); G1CollectorPolicy();
@ -710,8 +782,6 @@ public:
// This should be called after the heap is resized. // This should be called after the heap is resized.
void record_new_heap_size(size_t new_number_of_regions); void record_new_heap_size(size_t new_number_of_regions);
public:
void init(); void init();
// Create jstat counters for the policy. // Create jstat counters for the policy.
@ -894,6 +964,10 @@ public:
// Initialize incremental collection set info. // Initialize incremental collection set info.
void start_incremental_cset_building(); void start_incremental_cset_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_cset_building();
void clear_incremental_cset() { void clear_incremental_cset() {
_inc_cset_head = NULL; _inc_cset_head = NULL;
_inc_cset_tail = NULL; _inc_cset_tail = NULL;
@ -902,10 +976,9 @@ public:
// Stop adding regions to the incremental collection set // Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add/remove information about hr to the aggregated information // Add information about hr to the aggregated information for the
// for the incrementally built collection set. // incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
void remove_from_incremental_cset_info(HeapRegion* hr);
// Update information about hr in the aggregated information for // Update information about hr in the aggregated information for
// the incrementally built collection set. // the incrementally built collection set.
@ -998,10 +1071,7 @@ public:
} }
bool adaptive_young_list_length() { bool adaptive_young_list_length() {
return _adaptive_young_list_length; return _young_gen_sizer->adaptive_young_list_length();
}
void set_adaptive_young_list_length(bool adaptive_young_list_length) {
_adaptive_young_list_length = adaptive_young_list_length;
} }
inline double get_gc_eff_factor() { inline double get_gc_eff_factor() {

View File

@ -558,11 +558,11 @@ void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
} }
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) { uint worker_num, int claim_val) {
ScrubRSClosure scrub_cl(region_bm, card_bm); ScrubRSClosure scrub_cl(region_bm, card_bm);
_g1->heap_region_par_iterate_chunked(&scrub_cl, _g1->heap_region_par_iterate_chunked(&scrub_cl,
worker_num, worker_num,
(int) n_workers(), n_workers(),
claim_val); claim_val);
} }

View File

@ -40,7 +40,7 @@ class G1RemSet: public CHeapObj {
protected: protected:
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
unsigned _conc_refine_cards; unsigned _conc_refine_cards;
size_t n_workers(); uint n_workers();
protected: protected:
enum SomePrivateConstants { enum SomePrivateConstants {
@ -122,7 +122,7 @@ public:
// parallel thread id of the current thread, and "claim_val" is the // parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions. // value that should be used to claim heap regions.
void scrub_par(BitMap* region_bm, BitMap* card_bm, void scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val); uint worker_num, int claim_val);
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL, // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means // join and leave around parts that must be atomic wrt GC. (NULL means

View File

@ -29,7 +29,7 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
inline size_t G1RemSet::n_workers() { inline uint G1RemSet::n_workers() {
if (_g1->workers() != NULL) { if (_g1->workers() != NULL) {
return _g1->workers()->total_workers(); return _g1->workers()->total_workers();
} else { } else {

View File

@ -289,7 +289,15 @@
\ \
develop(uintx, G1ConcMarkForceOverflow, 0, \ develop(uintx, G1ConcMarkForceOverflow, 0, \
"The number of times we'll force an overflow during " \ "The number of times we'll force an overflow during " \
"concurrent marking") "concurrent marking") \
\
develop(uintx, G1DefaultMinNewGenPercent, 20, \
"Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \
\
develop(uintx, G1DefaultMaxNewGenPercent, 50, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

View File

@ -94,7 +94,8 @@ public:
#endif // PRODUCT #endif // PRODUCT
} }
template <class T> void do_oop_work(T* p) { template <class T>
void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition"); assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
"Precondition"); "Precondition");
@ -102,8 +103,10 @@ public:
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
bool failed = false; bool failed = false;
if (!_g1h->is_in_closed_subset(obj) || if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
_g1h->is_obj_dead_cond(obj, _vo)) { MutexLockerEx x(ParGCRareEvent_lock,
Mutex::_no_safepoint_check_flag);
if (!_failures) { if (!_failures) {
gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
@ -133,6 +136,7 @@ public:
print_object(gclog_or_tty, obj); print_object(gclog_or_tty, obj);
} }
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
gclog_or_tty->flush();
_failures = true; _failures = true;
failed = true; failed = true;
_n_failures++; _n_failures++;
@ -155,6 +159,9 @@ public:
cv_field == dirty cv_field == dirty
: cv_obj == dirty || cv_field == dirty)); : cv_obj == dirty || cv_field == dirty));
if (is_bad) { if (is_bad) {
MutexLockerEx x(ParGCRareEvent_lock,
Mutex::_no_safepoint_check_flag);
if (!_failures) { if (!_failures) {
gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
@ -174,6 +181,7 @@ public:
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
cv_obj, cv_field); cv_obj, cv_field);
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
gclog_or_tty->flush();
_failures = true; _failures = true;
if (!failed) _n_failures++; if (!failed) _n_failures++;
} }

View File

@ -56,14 +56,14 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
lowest_non_clean_base_chunk_index, lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size); lowest_non_clean_chunk_size);
int n_strides = n_threads * ParGCStridesPerThread; uint n_strides = n_threads * ParGCStridesPerThread;
SequentialSubTasksDone* pst = sp->par_seq_tasks(); SequentialSubTasksDone* pst = sp->par_seq_tasks();
// Sets the condition for completion of the subtask (how many threads // Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done). // need to finish in order to be done).
pst->set_n_threads(n_threads); pst->set_n_threads(n_threads);
pst->set_n_tasks(n_strides); pst->set_n_tasks(n_strides);
int stride = 0; uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) { while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, cl, ct, process_stride(sp, mr, stride, n_strides, cl, ct,
lowest_non_clean, lowest_non_clean,

View File

@ -590,7 +590,7 @@ void ParNewGenTask::set_for_termination(int active_workers) {
// called after a task is started. So "i" is based on // called after a task is started. So "i" is based on
// first-come-first-served. // first-come-first-served.
void ParNewGenTask::work(int i) { void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
// Since this is being done in a separate thread, need new resource // Since this is being done in a separate thread, need new resource
// and handle marks. // and handle marks.
@ -601,8 +601,8 @@ void ParNewGenTask::work(int i) {
Generation* old_gen = gch->next_gen(_gen); Generation* old_gen = gch->next_gen(_gen);
ParScanThreadState& par_scan_state = _state_set->thread_state(i); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
assert(_state_set->is_valid(i), "Should not have been called"); assert(_state_set->is_valid(worker_id), "Should not have been called");
par_scan_state.set_young_old_boundary(_young_old_boundary); par_scan_state.set_young_old_boundary(_young_old_boundary);
@ -755,7 +755,7 @@ public:
ParScanThreadStateSet& state_set); ParScanThreadStateSet& state_set);
private: private:
virtual void work(int i); virtual void work(uint worker_id);
virtual void set_for_termination(int active_workers) { virtual void set_for_termination(int active_workers) {
_state_set.terminator()->reset_for_reuse(active_workers); _state_set.terminator()->reset_for_reuse(active_workers);
} }
@ -781,13 +781,13 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
{ {
} }
void ParNewRefProcTaskProxy::work(int i) void ParNewRefProcTaskProxy::work(uint worker_id)
{ {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
ParScanThreadState& par_scan_state = _state_set.thread_state(i); ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
par_scan_state.set_young_old_boundary(_young_old_boundary); par_scan_state.set_young_old_boundary(_young_old_boundary);
_task.work(i, par_scan_state.is_alive_closure(), _task.work(worker_id, par_scan_state.is_alive_closure(),
par_scan_state.keep_alive_closure(), par_scan_state.keep_alive_closure(),
par_scan_state.evacuate_followers_closure()); par_scan_state.evacuate_followers_closure());
} }
@ -802,9 +802,9 @@ public:
_task(task) _task(task)
{ } { }
virtual void work(int i) virtual void work(uint worker_id)
{ {
_task.work(i); _task.work(worker_id);
} }
}; };

View File

@ -239,7 +239,7 @@ public:
HeapWord* young_old_boundary() { return _young_old_boundary; } HeapWord* young_old_boundary() { return _young_old_boundary; }
void work(int i); void work(uint worker_id);
// Reset the terminator in ParScanThreadStateSet for // Reset the terminator in ParScanThreadStateSet for
// "active_workers" threads. // "active_workers" threads.

View File

@ -69,7 +69,7 @@ class CollectedHeap : public CHeapObj {
MemRegion _reserved; MemRegion _reserved;
BarrierSet* _barrier_set; BarrierSet* _barrier_set;
bool _is_gc_active; bool _is_gc_active;
int _n_par_threads; uint _n_par_threads;
unsigned int _total_collections; // ... started unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started unsigned int _total_full_collections; // ... started
@ -309,10 +309,10 @@ class CollectedHeap : public CHeapObj {
GCCause::Cause gc_cause() { return _gc_cause; } GCCause::Cause gc_cause() { return _gc_cause; }
// Number of threads currently working on GC tasks. // Number of threads currently working on GC tasks.
int n_par_threads() { return _n_par_threads; } uint n_par_threads() { return _n_par_threads; }
// May be overridden to set additional parallelism. // May be overridden to set additional parallelism.
virtual void set_par_threads(int t) { _n_par_threads = t; }; virtual void set_par_threads(uint t) { _n_par_threads = t; };
// Preload classes into the shared portion of the heap, and then dump // Preload classes into the shared portion of the heap, and then dump
// that data to a file so that it can be loaded directly by another // that data to a file so that it can be loaded directly by another

View File

@ -703,7 +703,7 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
return collector_policy()->satisfy_failed_allocation(size, is_tlab); return collector_policy()->satisfy_failed_allocation(size, is_tlab);
} }
void GenCollectedHeap::set_par_threads(int t) { void GenCollectedHeap::set_par_threads(uint t) {
SharedHeap::set_par_threads(t); SharedHeap::set_par_threads(t);
_gen_process_strong_tasks->set_n_threads(t); _gen_process_strong_tasks->set_n_threads(t);
} }

View File

@ -419,8 +419,7 @@ public:
// asserted to be this type. // asserted to be this type.
static GenCollectedHeap* heap(); static GenCollectedHeap* heap();
void set_par_threads(int t); void set_par_threads(uint t);
// Invoke the "do_oop" method of one of the closures "not_older_gens" // Invoke the "do_oop" method of one of the closures "not_older_gens"
// or "older_gens" on root locations for the generation at // or "older_gens" on root locations for the generation at

View File

@ -88,9 +88,9 @@ void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_re
ReferenceProcessor::ReferenceProcessor(MemRegion span, ReferenceProcessor::ReferenceProcessor(MemRegion span,
bool mt_processing, bool mt_processing,
int mt_processing_degree, uint mt_processing_degree,
bool mt_discovery, bool mt_discovery,
int mt_discovery_degree, uint mt_discovery_degree,
bool atomic_discovery, bool atomic_discovery,
BoolObjectClosure* is_alive_non_header, BoolObjectClosure* is_alive_non_header,
bool discovered_list_needs_barrier) : bool discovered_list_needs_barrier) :
@ -105,7 +105,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_span = span; _span = span;
_discovery_is_atomic = atomic_discovery; _discovery_is_atomic = atomic_discovery;
_discovery_is_mt = mt_discovery; _discovery_is_mt = mt_discovery;
_num_q = MAX2(1, mt_processing_degree); _num_q = MAX2(1U, mt_processing_degree);
_max_num_q = MAX2(_num_q, mt_discovery_degree); _max_num_q = MAX2(_num_q, mt_discovery_degree);
_discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList,
_max_num_q * number_of_subclasses_of_ref()); _max_num_q * number_of_subclasses_of_ref());
@ -118,7 +118,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
// Initialize all entries to NULL // Initialize all entries to NULL
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
_discovered_refs[i].set_head(NULL); _discovered_refs[i].set_head(NULL);
_discovered_refs[i].set_length(0); _discovered_refs[i].set_length(0);
} }
@ -133,7 +133,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
#ifndef PRODUCT #ifndef PRODUCT
void ReferenceProcessor::verify_no_references_recorded() { void ReferenceProcessor::verify_no_references_recorded() {
guarantee(!_discovering_refs, "Discovering refs?"); guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
guarantee(_discovered_refs[i].is_empty(), guarantee(_discovered_refs[i].is_empty(),
"Found non-empty discovered list"); "Found non-empty discovered list");
} }
@ -141,7 +141,7 @@ void ReferenceProcessor::verify_no_references_recorded() {
#endif #endif
void ReferenceProcessor::weak_oops_do(OopClosure* f) { void ReferenceProcessor::weak_oops_do(OopClosure* f) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (UseCompressedOops) { if (UseCompressedOops) {
f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
} else { } else {
@ -437,7 +437,7 @@ void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr
task_executor->execute(tsk); task_executor->execute(tsk);
} else { } else {
// Serial code: call the parent class's implementation // Serial code: call the parent class's implementation
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
_discovered_refs[i].set_head(NULL); _discovered_refs[i].set_head(NULL);
_discovered_refs[i].set_length(0); _discovered_refs[i].set_length(0);
@ -696,7 +696,7 @@ ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
void ReferenceProcessor::abandon_partial_discovery() { void ReferenceProcessor::abandon_partial_discovery() {
// loop over the lists // loop over the lists
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
} }
@ -787,7 +787,7 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
gclog_or_tty->print_cr("\nBalance ref_lists "); gclog_or_tty->print_cr("\nBalance ref_lists ");
} }
for (int i = 0; i < _max_num_q; ++i) { for (uint i = 0; i < _max_num_q; ++i) {
total_refs += ref_lists[i].length(); total_refs += ref_lists[i].length();
if (TraceReferenceGC && PrintGCDetails) { if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print("%d ", ref_lists[i].length()); gclog_or_tty->print("%d ", ref_lists[i].length());
@ -797,8 +797,8 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
gclog_or_tty->print_cr(" = %d", total_refs); gclog_or_tty->print_cr(" = %d", total_refs);
} }
size_t avg_refs = total_refs / _num_q + 1; size_t avg_refs = total_refs / _num_q + 1;
int to_idx = 0; uint to_idx = 0;
for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) {
bool move_all = false; bool move_all = false;
if (from_idx >= _num_q) { if (from_idx >= _num_q) {
move_all = ref_lists[from_idx].length() > 0; move_all = ref_lists[from_idx].length() > 0;
@ -857,7 +857,7 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
} }
#ifdef ASSERT #ifdef ASSERT
size_t balanced_total_refs = 0; size_t balanced_total_refs = 0;
for (int i = 0; i < _max_num_q; ++i) { for (uint i = 0; i < _max_num_q; ++i) {
balanced_total_refs += ref_lists[i].length(); balanced_total_refs += ref_lists[i].length();
if (TraceReferenceGC && PrintGCDetails) { if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print("%d ", ref_lists[i].length()); gclog_or_tty->print("%d ", ref_lists[i].length());
@ -903,7 +903,7 @@ ReferenceProcessor::process_discovered_reflist(
} }
if (PrintReferenceGC && PrintGCDetails) { if (PrintReferenceGC && PrintGCDetails) {
size_t total = 0; size_t total = 0;
for (int i = 0; i < _max_num_q; ++i) { for (uint i = 0; i < _max_num_q; ++i) {
total += refs_lists[i].length(); total += refs_lists[i].length();
} }
gclog_or_tty->print(", %u refs", total); gclog_or_tty->print(", %u refs", total);
@ -919,7 +919,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
task_executor->execute(phase1); task_executor->execute(phase1);
} else { } else {
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
process_phase1(refs_lists[i], policy, process_phase1(refs_lists[i], policy,
is_alive, keep_alive, complete_gc); is_alive, keep_alive, complete_gc);
} }
@ -935,7 +935,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
task_executor->execute(phase2); task_executor->execute(phase2);
} else { } else {
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
} }
} }
@ -946,7 +946,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
task_executor->execute(phase3); task_executor->execute(phase3);
} else { } else {
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
process_phase3(refs_lists[i], clear_referent, process_phase3(refs_lists[i], clear_referent,
is_alive, keep_alive, complete_gc); is_alive, keep_alive, complete_gc);
} }
@ -955,7 +955,7 @@ ReferenceProcessor::process_discovered_reflist(
void ReferenceProcessor::clean_up_discovered_references() { void ReferenceProcessor::clean_up_discovered_references() {
// loop over the lists // loop over the lists
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"\nScrubbing %s discovered list of Null referents", "\nScrubbing %s discovered list of Null referents",
@ -1000,7 +1000,7 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list)
} }
inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
int id = 0; uint id = 0;
// Determine the queue index to use for this object. // Determine the queue index to use for this object.
if (_discovery_is_mt) { if (_discovery_is_mt) {
// During a multi-threaded discovery phase, // During a multi-threaded discovery phase,
@ -1282,7 +1282,7 @@ void ReferenceProcessor::preclean_discovered_references(
{ {
TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty); false, gclog_or_tty);
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
} }
@ -1295,7 +1295,7 @@ void ReferenceProcessor::preclean_discovered_references(
{ {
TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty); false, gclog_or_tty);
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
} }
@ -1308,7 +1308,7 @@ void ReferenceProcessor::preclean_discovered_references(
{ {
TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty); false, gclog_or_tty);
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
} }
@ -1321,7 +1321,7 @@ void ReferenceProcessor::preclean_discovered_references(
{ {
TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty); false, gclog_or_tty);
for (int i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
} }
@ -1386,7 +1386,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
) )
} }
const char* ReferenceProcessor::list_name(int i) { const char* ReferenceProcessor::list_name(uint i) {
assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
"Out of bounds index"); "Out of bounds index");
@ -1410,7 +1410,7 @@ void ReferenceProcessor::verify_ok_to_handle_reflists() {
#ifndef PRODUCT #ifndef PRODUCT
void ReferenceProcessor::clear_discovered_references() { void ReferenceProcessor::clear_discovered_references() {
guarantee(!_discovering_refs, "Discovering refs?"); guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
clear_discovered_references(_discovered_refs[i]); clear_discovered_references(_discovered_refs[i]);
} }
} }

View File

@ -231,7 +231,7 @@ class ReferenceProcessor : public CHeapObj {
bool _enqueuing_is_done; // true if all weak references enqueued bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when bool _processing_is_mt; // true during phases when
// reference processing is MT. // reference processing is MT.
int _next_id; // round-robin mod _num_q counter in uint _next_id; // round-robin mod _num_q counter in
// support of work distribution // support of work distribution
// For collectors that do not keep GC liveness information // For collectors that do not keep GC liveness information
@ -252,9 +252,9 @@ class ReferenceProcessor : public CHeapObj {
// The discovered ref lists themselves // The discovered ref lists themselves
// The active MT'ness degree of the queues below // The active MT'ness degree of the queues below
int _num_q; uint _num_q;
// The maximum MT'ness degree of the queues below // The maximum MT'ness degree of the queues below
int _max_num_q; uint _max_num_q;
// Master array of discovered oops // Master array of discovered oops
DiscoveredList* _discovered_refs; DiscoveredList* _discovered_refs;
@ -268,9 +268,9 @@ class ReferenceProcessor : public CHeapObj {
public: public:
static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
int num_q() { return _num_q; } uint num_q() { return _num_q; }
int max_num_q() { return _max_num_q; } uint max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; } void set_active_mt_degree(uint v) { _num_q = v; }
DiscoveredList* discovered_refs() { return _discovered_refs; } DiscoveredList* discovered_refs() { return _discovered_refs; }
@ -368,7 +368,7 @@ class ReferenceProcessor : public CHeapObj {
// Returns the name of the discovered reference list // Returns the name of the discovered reference list
// occupying the i / _num_q slot. // occupying the i / _num_q slot.
const char* list_name(int i); const char* list_name(uint i);
void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
@ -388,8 +388,8 @@ class ReferenceProcessor : public CHeapObj {
YieldClosure* yield); YieldClosure* yield);
// round-robin mod _num_q (not: _not_ mode _max_num_q) // round-robin mod _num_q (not: _not_ mode _max_num_q)
int next_id() { uint next_id() {
int id = _next_id; uint id = _next_id;
if (++_next_id == _num_q) { if (++_next_id == _num_q) {
_next_id = 0; _next_id = 0;
} }
@ -434,8 +434,8 @@ class ReferenceProcessor : public CHeapObj {
// Default parameters give you a vanilla reference processor. // Default parameters give you a vanilla reference processor.
ReferenceProcessor(MemRegion span, ReferenceProcessor(MemRegion span,
bool mt_processing = false, int mt_processing_degree = 1, bool mt_processing = false, uint mt_processing_degree = 1,
bool mt_discovery = false, int mt_discovery_degree = 1, bool mt_discovery = false, uint mt_discovery_degree = 1,
bool atomic_discovery = true, bool atomic_discovery = true,
BoolObjectClosure* is_alive_non_header = NULL, BoolObjectClosure* is_alive_non_header = NULL,
bool discovered_list_needs_barrier = false); bool discovered_list_needs_barrier = false);

View File

@ -94,7 +94,7 @@ bool SharedHeap::heap_lock_held_for_gc() {
&& _thread_holds_heap_lock_for_gc); && _thread_holds_heap_lock_for_gc);
} }
void SharedHeap::set_par_threads(int t) { void SharedHeap::set_par_threads(uint t) {
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads"); assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
_n_par_threads = t; _n_par_threads = t;
_process_strong_tasks->set_n_threads(t); _process_strong_tasks->set_n_threads(t);

View File

@ -287,7 +287,7 @@ public:
// Sets the number of parallel threads that will be doing tasks // Sets the number of parallel threads that will be doing tasks
// (such as process strong roots) subsequently. // (such as process strong roots) subsequently.
virtual void set_par_threads(int t); virtual void set_par_threads(uint t);
int n_termination(); int n_termination();
void set_n_termination(int t); void set_n_termination(int t);

View File

@ -1553,7 +1553,7 @@ class CommandLineFlags {
product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
"The desired number of objects to claim from the overflow list") \ "The desired number of objects to claim from the overflow list") \
\ \
diagnostic(intx, ParGCStridesPerThread, 2, \ diagnostic(uintx, ParGCStridesPerThread, 2, \
"The number of strides per worker thread that we divide up the " \ "The number of strides per worker thread that we divide up the " \
"card table scanning work into") \ "card table scanning work into") \
\ \

View File

@ -53,14 +53,14 @@ AbstractWorkGang::AbstractWorkGang(const char* name,
} }
WorkGang::WorkGang(const char* name, WorkGang::WorkGang(const char* name,
int workers, uint workers,
bool are_GC_task_threads, bool are_GC_task_threads,
bool are_ConcurrentGC_threads) : bool are_ConcurrentGC_threads) :
AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) { AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) {
_total_workers = workers; _total_workers = workers;
} }
GangWorker* WorkGang::allocate_worker(int which) { GangWorker* WorkGang::allocate_worker(uint which) {
GangWorker* new_worker = new GangWorker(this, which); GangWorker* new_worker = new GangWorker(this, which);
return new_worker; return new_worker;
} }
@ -88,7 +88,7 @@ bool WorkGang::initialize_workers() {
} else { } else {
worker_type = os::pgc_thread; worker_type = os::pgc_thread;
} }
for (int worker = 0; worker < total_workers(); worker += 1) { for (uint worker = 0; worker < total_workers(); worker += 1) {
GangWorker* new_worker = allocate_worker(worker); GangWorker* new_worker = allocate_worker(worker);
assert(new_worker != NULL, "Failed to allocate GangWorker"); assert(new_worker != NULL, "Failed to allocate GangWorker");
_gang_workers[worker] = new_worker; _gang_workers[worker] = new_worker;
@ -108,14 +108,14 @@ AbstractWorkGang::~AbstractWorkGang() {
tty->print_cr("Destructing work gang %s", name()); tty->print_cr("Destructing work gang %s", name());
} }
stop(); // stop all the workers stop(); // stop all the workers
for (int worker = 0; worker < total_workers(); worker += 1) { for (uint worker = 0; worker < total_workers(); worker += 1) {
delete gang_worker(worker); delete gang_worker(worker);
} }
delete gang_workers(); delete gang_workers();
delete monitor(); delete monitor();
} }
GangWorker* AbstractWorkGang::gang_worker(int i) const { GangWorker* AbstractWorkGang::gang_worker(uint i) const {
// Array index bounds checking. // Array index bounds checking.
GangWorker* result = NULL; GangWorker* result = NULL;
assert(gang_workers() != NULL, "No workers for indexing"); assert(gang_workers() != NULL, "No workers for indexing");
@ -148,7 +148,7 @@ void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) {
// Tell the workers to get to work. // Tell the workers to get to work.
monitor()->notify_all(); monitor()->notify_all();
// Wait for them to be finished // Wait for them to be finished
while (finished_workers() < (int) no_of_parallel_workers) { while (finished_workers() < no_of_parallel_workers) {
if (TraceWorkGang) { if (TraceWorkGang) {
tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d",
name(), finished_workers(), no_of_parallel_workers, name(), finished_workers(), no_of_parallel_workers,
@ -377,12 +377,12 @@ WorkGangBarrierSync::WorkGangBarrierSync()
_n_workers(0), _n_completed(0), _should_reset(false) { _n_workers(0), _n_completed(0), _should_reset(false) {
} }
WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name) WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
: _monitor(Mutex::safepoint, name, true), : _monitor(Mutex::safepoint, name, true),
_n_workers(n_workers), _n_completed(0), _should_reset(false) { _n_workers(n_workers), _n_completed(0), _should_reset(false) {
} }
void WorkGangBarrierSync::set_n_workers(int n_workers) { void WorkGangBarrierSync::set_n_workers(uint n_workers) {
_n_workers = n_workers; _n_workers = n_workers;
_n_completed = 0; _n_completed = 0;
_should_reset = false; _should_reset = false;
@ -419,9 +419,9 @@ void WorkGangBarrierSync::enter() {
// SubTasksDone functions. // SubTasksDone functions.
SubTasksDone::SubTasksDone(int n) : SubTasksDone::SubTasksDone(uint n) :
_n_tasks(n), _n_threads(1), _tasks(NULL) { _n_tasks(n), _n_threads(1), _tasks(NULL) {
_tasks = NEW_C_HEAP_ARRAY(jint, n); _tasks = NEW_C_HEAP_ARRAY(uint, n);
guarantee(_tasks != NULL, "alloc failure"); guarantee(_tasks != NULL, "alloc failure");
clear(); clear();
} }
@ -430,14 +430,14 @@ bool SubTasksDone::valid() {
return _tasks != NULL; return _tasks != NULL;
} }
void SubTasksDone::set_n_threads(int t) { void SubTasksDone::set_n_threads(uint t) {
assert(_claimed == 0 || _threads_completed == _n_threads, assert(_claimed == 0 || _threads_completed == _n_threads,
"should not be called while tasks are being processed!"); "should not be called while tasks are being processed!");
_n_threads = (t == 0 ? 1 : t); _n_threads = (t == 0 ? 1 : t);
} }
void SubTasksDone::clear() { void SubTasksDone::clear() {
for (int i = 0; i < _n_tasks; i++) { for (uint i = 0; i < _n_tasks; i++) {
_tasks[i] = 0; _tasks[i] = 0;
} }
_threads_completed = 0; _threads_completed = 0;
@ -446,9 +446,9 @@ void SubTasksDone::clear() {
#endif #endif
} }
bool SubTasksDone::is_task_claimed(int t) { bool SubTasksDone::is_task_claimed(uint t) {
assert(0 <= t && t < _n_tasks, "bad task id."); assert(0 <= t && t < _n_tasks, "bad task id.");
jint old = _tasks[t]; uint old = _tasks[t];
if (old == 0) { if (old == 0) {
old = Atomic::cmpxchg(1, &_tasks[t], 0); old = Atomic::cmpxchg(1, &_tasks[t], 0);
} }
@ -457,7 +457,7 @@ bool SubTasksDone::is_task_claimed(int t) {
#ifdef ASSERT #ifdef ASSERT
if (!res) { if (!res) {
assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
Atomic::inc(&_claimed); Atomic::inc((volatile jint*) &_claimed);
} }
#endif #endif
return res; return res;
@ -471,7 +471,7 @@ void SubTasksDone::all_tasks_completed() {
observed = Atomic::cmpxchg(old+1, &_threads_completed, old); observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
} while (observed != old); } while (observed != old);
// If this was the last thread checking in, clear the tasks. // If this was the last thread checking in, clear the tasks.
if (observed+1 == _n_threads) clear(); if (observed+1 == (jint)_n_threads) clear();
} }
@ -490,12 +490,12 @@ bool SequentialSubTasksDone::valid() {
return _n_threads > 0; return _n_threads > 0;
} }
bool SequentialSubTasksDone::is_task_claimed(int& t) { bool SequentialSubTasksDone::is_task_claimed(uint& t) {
jint* n_claimed_ptr = &_n_claimed; uint* n_claimed_ptr = &_n_claimed;
t = *n_claimed_ptr; t = *n_claimed_ptr;
while (t < _n_tasks) { while (t < _n_tasks) {
jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t); jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t);
if (res == t) { if (res == (jint)t) {
return false; return false;
} }
t = *n_claimed_ptr; t = *n_claimed_ptr;
@ -504,10 +504,10 @@ bool SequentialSubTasksDone::is_task_claimed(int& t) {
} }
bool SequentialSubTasksDone::all_tasks_completed() { bool SequentialSubTasksDone::all_tasks_completed() {
jint* n_completed_ptr = &_n_completed; uint* n_completed_ptr = &_n_completed;
jint complete = *n_completed_ptr; uint complete = *n_completed_ptr;
while (true) { while (true) {
jint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete); uint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete);
if (res == complete) { if (res == complete) {
break; break;
} }

View File

@ -68,7 +68,7 @@ class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
public: public:
// The abstract work method. // The abstract work method.
// The argument tells you which member of the gang you are. // The argument tells you which member of the gang you are.
virtual void work(int i) = 0; virtual void work(uint worker_id) = 0;
// This method configures the task for proper termination. // This method configures the task for proper termination.
// Some tasks do not have any requirements on termination // Some tasks do not have any requirements on termination
@ -149,7 +149,7 @@ protected:
// and notifies of changes in it. // and notifies of changes in it.
Monitor* _monitor; Monitor* _monitor;
// The count of the number of workers in the gang. // The count of the number of workers in the gang.
int _total_workers; uint _total_workers;
// Whether the workers should terminate. // Whether the workers should terminate.
bool _terminate; bool _terminate;
// The array of worker threads for this gang. // The array of worker threads for this gang.
@ -160,18 +160,18 @@ protected:
// A sequence number for the current task. // A sequence number for the current task.
int _sequence_number; int _sequence_number;
// The number of started workers. // The number of started workers.
int _started_workers; uint _started_workers;
// The number of finished workers. // The number of finished workers.
int _finished_workers; uint _finished_workers;
public: public:
// Accessors for fields // Accessors for fields
Monitor* monitor() const { Monitor* monitor() const {
return _monitor; return _monitor;
} }
int total_workers() const { uint total_workers() const {
return _total_workers; return _total_workers;
} }
virtual int active_workers() const { virtual uint active_workers() const {
return _total_workers; return _total_workers;
} }
bool terminate() const { bool terminate() const {
@ -186,10 +186,10 @@ public:
int sequence_number() const { int sequence_number() const {
return _sequence_number; return _sequence_number;
} }
int started_workers() const { uint started_workers() const {
return _started_workers; return _started_workers;
} }
int finished_workers() const { uint finished_workers() const {
return _finished_workers; return _finished_workers;
} }
bool are_GC_task_threads() const { bool are_GC_task_threads() const {
@ -203,7 +203,7 @@ public:
return (task() == NULL); return (task() == NULL);
} }
// Return the Ith gang worker. // Return the Ith gang worker.
GangWorker* gang_worker(int i) const; GangWorker* gang_worker(uint i) const;
void threads_do(ThreadClosure* tc) const; void threads_do(ThreadClosure* tc) const;
@ -255,13 +255,13 @@ public:
class WorkGang: public AbstractWorkGang { class WorkGang: public AbstractWorkGang {
public: public:
// Constructor // Constructor
WorkGang(const char* name, int workers, WorkGang(const char* name, uint workers,
bool are_GC_task_threads, bool are_ConcurrentGC_threads); bool are_GC_task_threads, bool are_ConcurrentGC_threads);
// Run a task, returns when the task is done (or terminated). // Run a task, returns when the task is done (or terminated).
virtual void run_task(AbstractGangTask* task); virtual void run_task(AbstractGangTask* task);
void run_task(AbstractGangTask* task, uint no_of_parallel_workers); void run_task(AbstractGangTask* task, uint no_of_parallel_workers);
// Allocate a worker and return a pointer to it. // Allocate a worker and return a pointer to it.
virtual GangWorker* allocate_worker(int which); virtual GangWorker* allocate_worker(uint which);
// Initialize workers in the gang. Return true if initialization // Initialize workers in the gang. Return true if initialization
// succeeded. The type of the worker can be overridden in a derived // succeeded. The type of the worker can be overridden in a derived
// class with the appropriate implementation of allocate_worker(). // class with the appropriate implementation of allocate_worker().
@ -323,25 +323,25 @@ class FlexibleWorkGang: public WorkGang {
// determine completion. // determine completion.
protected: protected:
int _active_workers; uint _active_workers;
public: public:
// Constructor and destructor. // Constructor and destructor.
// Initialize active_workers to a minimum value. Setting it to // Initialize active_workers to a minimum value. Setting it to
// the parameter "workers" will initialize it to a maximum // the parameter "workers" will initialize it to a maximum
// value which is not desirable. // value which is not desirable.
FlexibleWorkGang(const char* name, int workers, FlexibleWorkGang(const char* name, uint workers,
bool are_GC_task_threads, bool are_GC_task_threads,
bool are_ConcurrentGC_threads) : bool are_ConcurrentGC_threads) :
WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
_active_workers(UseDynamicNumberOfGCThreads ? 1 : ParallelGCThreads) {}; _active_workers(UseDynamicNumberOfGCThreads ? 1U : ParallelGCThreads) {}
// Accessors for fields // Accessors for fields
virtual int active_workers() const { return _active_workers; } virtual uint active_workers() const { return _active_workers; }
void set_active_workers(int v) { void set_active_workers(uint v) {
assert(v <= _total_workers, assert(v <= _total_workers,
"Trying to set more workers active than there are"); "Trying to set more workers active than there are");
_active_workers = MIN2(v, _total_workers); _active_workers = MIN2(v, _total_workers);
assert(v != 0, "Trying to set active workers to 0"); assert(v != 0, "Trying to set active workers to 0");
_active_workers = MAX2(1, _active_workers); _active_workers = MAX2(1U, _active_workers);
assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers, assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
"Unless dynamic should use total workers"); "Unless dynamic should use total workers");
} }
@ -370,13 +370,13 @@ class FlexibleWorkGang: public WorkGang {
class WorkGangBarrierSync : public StackObj { class WorkGangBarrierSync : public StackObj {
protected: protected:
Monitor _monitor; Monitor _monitor;
int _n_workers; uint _n_workers;
int _n_completed; uint _n_completed;
bool _should_reset; bool _should_reset;
Monitor* monitor() { return &_monitor; } Monitor* monitor() { return &_monitor; }
int n_workers() { return _n_workers; } uint n_workers() { return _n_workers; }
int n_completed() { return _n_completed; } uint n_completed() { return _n_completed; }
bool should_reset() { return _should_reset; } bool should_reset() { return _should_reset; }
void zero_completed() { _n_completed = 0; } void zero_completed() { _n_completed = 0; }
@ -386,11 +386,11 @@ protected:
public: public:
WorkGangBarrierSync(); WorkGangBarrierSync();
WorkGangBarrierSync(int n_workers, const char* name); WorkGangBarrierSync(uint n_workers, const char* name);
// Set the number of workers that will use the barrier. // Set the number of workers that will use the barrier.
// Must be called before any of the workers start running. // Must be called before any of the workers start running.
void set_n_workers(int n_workers); void set_n_workers(uint n_workers);
// Enter the barrier. A worker that enters the barrier will // Enter the barrier. A worker that enters the barrier will
// not be allowed to leave until all other threads have // not be allowed to leave until all other threads have
@ -403,17 +403,17 @@ public:
// enumeration type. // enumeration type.
class SubTasksDone : public CHeapObj { class SubTasksDone : public CHeapObj {
jint* _tasks; uint* _tasks;
int _n_tasks; uint _n_tasks;
// _n_threads is used to determine when a sub task is done. // _n_threads is used to determine when a sub task is done.
// It does not control how many threads will execute the subtask // It does not control how many threads will execute the subtask
// but must be initialized to the number that do execute the task // but must be initialized to the number that do execute the task
// in order to correctly decide when the subtask is done (all the // in order to correctly decide when the subtask is done (all the
// threads working on the task have finished). // threads working on the task have finished).
int _n_threads; uint _n_threads;
jint _threads_completed; uint _threads_completed;
#ifdef ASSERT #ifdef ASSERT
volatile jint _claimed; volatile uint _claimed;
#endif #endif
// Set all tasks to unclaimed. // Set all tasks to unclaimed.
@ -423,19 +423,19 @@ public:
// Initializes "this" to a state in which there are "n" tasks to be // Initializes "this" to a state in which there are "n" tasks to be
// processed, none of the which are originally claimed. The number of // processed, none of the which are originally claimed. The number of
// threads doing the tasks is initialized 1. // threads doing the tasks is initialized 1.
SubTasksDone(int n); SubTasksDone(uint n);
// True iff the object is in a valid state. // True iff the object is in a valid state.
bool valid(); bool valid();
// Get/set the number of parallel threads doing the tasks to "t". Can only // Get/set the number of parallel threads doing the tasks to "t". Can only
// be called before tasks start or after they are complete. // be called before tasks start or after they are complete.
int n_threads() { return _n_threads; } uint n_threads() { return _n_threads; }
void set_n_threads(int t); void set_n_threads(uint t);
// Returns "false" if the task "t" is unclaimed, and ensures that task is // Returns "false" if the task "t" is unclaimed, and ensures that task is
// claimed. The task "t" is required to be within the range of "this". // claimed. The task "t" is required to be within the range of "this".
bool is_task_claimed(int t); bool is_task_claimed(uint t);
// The calling thread asserts that it has attempted to claim all the // The calling thread asserts that it has attempted to claim all the
// tasks that it will try to claim. Every thread in the parallel task // tasks that it will try to claim. Every thread in the parallel task
@ -456,12 +456,12 @@ public:
class SequentialSubTasksDone : public StackObj { class SequentialSubTasksDone : public StackObj {
protected: protected:
jint _n_tasks; // Total number of tasks available. uint _n_tasks; // Total number of tasks available.
jint _n_claimed; // Number of tasks claimed. uint _n_claimed; // Number of tasks claimed.
// _n_threads is used to determine when a sub task is done. // _n_threads is used to determine when a sub task is done.
// See comments on SubTasksDone::_n_threads // See comments on SubTasksDone::_n_threads
jint _n_threads; // Total number of parallel threads. uint _n_threads; // Total number of parallel threads.
jint _n_completed; // Number of completed threads. uint _n_completed; // Number of completed threads.
void clear(); void clear();
@ -475,26 +475,26 @@ public:
bool valid(); bool valid();
// number of tasks // number of tasks
jint n_tasks() const { return _n_tasks; } uint n_tasks() const { return _n_tasks; }
// Get/set the number of parallel threads doing the tasks to t. // Get/set the number of parallel threads doing the tasks to t.
// Should be called before the task starts but it is safe // Should be called before the task starts but it is safe
// to call this once a task is running provided that all // to call this once a task is running provided that all
// threads agree on the number of threads. // threads agree on the number of threads.
int n_threads() { return _n_threads; } uint n_threads() { return _n_threads; }
void set_n_threads(int t) { _n_threads = t; } void set_n_threads(uint t) { _n_threads = t; }
// Set the number of tasks to be claimed to t. As above, // Set the number of tasks to be claimed to t. As above,
// should be called before the tasks start but it is safe // should be called before the tasks start but it is safe
// to call this once a task is running provided all threads // to call this once a task is running provided all threads
// agree on the number of tasks. // agree on the number of tasks.
void set_n_tasks(int t) { _n_tasks = t; } void set_n_tasks(uint t) { _n_tasks = t; }
// Returns false if the next task in the sequence is unclaimed, // Returns false if the next task in the sequence is unclaimed,
// and ensures that it is claimed. Will set t to be the index // and ensures that it is claimed. Will set t to be the index
// of the claimed task in the sequence. Will return true if // of the claimed task in the sequence. Will return true if
// the task cannot be claimed and there are none left to claim. // the task cannot be claimed and there are none left to claim.
bool is_task_claimed(int& t); bool is_task_claimed(uint& t);
// The calling thread asserts that it has attempted to claim // The calling thread asserts that it has attempted to claim
// all the tasks it possibly can in the sequence. Every thread // all the tasks it possibly can in the sequence. Every thread

View File

@ -33,11 +33,11 @@ class GangWorker;
class WorkData; class WorkData;
YieldingFlexibleWorkGang::YieldingFlexibleWorkGang( YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
const char* name, int workers, bool are_GC_task_threads) : const char* name, uint workers, bool are_GC_task_threads) :
FlexibleWorkGang(name, workers, are_GC_task_threads, false), FlexibleWorkGang(name, workers, are_GC_task_threads, false),
_yielded_workers(0) {} _yielded_workers(0) {}
GangWorker* YieldingFlexibleWorkGang::allocate_worker(int which) { GangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
YieldingFlexibleGangWorker* new_member = YieldingFlexibleGangWorker* new_member =
new YieldingFlexibleGangWorker(this, which); new YieldingFlexibleGangWorker(this, which);
return (YieldingFlexibleGangWorker*) new_member; return (YieldingFlexibleGangWorker*) new_member;
@ -120,7 +120,7 @@ void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
new_task->set_gang(this); // Establish 2-way binding to support yielding new_task->set_gang(this); // Establish 2-way binding to support yielding
_sequence_number++; _sequence_number++;
int requested_size = new_task->requested_size(); uint requested_size = new_task->requested_size();
assert(requested_size >= 0, "Should be non-negative"); assert(requested_size >= 0, "Should be non-negative");
if (requested_size != 0) { if (requested_size != 0) {
_active_workers = MIN2(requested_size, total_workers()); _active_workers = MIN2(requested_size, total_workers());

View File

@ -71,7 +71,7 @@ public:
// The abstract work method. // The abstract work method.
// The argument tells you which member of the gang you are. // The argument tells you which member of the gang you are.
virtual void work(int i) = 0; virtual void work(uint worker_id) = 0;
int requested_size() const { return _requested_size; } int requested_size() const { return _requested_size; }
int actual_size() const { return _actual_size; } int actual_size() const { return _actual_size; }
@ -128,7 +128,7 @@ protected:
public: public:
// The abstract work method. // The abstract work method.
// The argument tells you which member of the gang you are. // The argument tells you which member of the gang you are.
virtual void work(int i) = 0; virtual void work(uint worker_id) = 0;
// Subclasses should call the parent's yield() method // Subclasses should call the parent's yield() method
// after having done any work specific to the subclass. // after having done any work specific to the subclass.
@ -159,7 +159,7 @@ class YieldingFlexibleWorkGang: public FlexibleWorkGang {
// Here's the public interface to this class. // Here's the public interface to this class.
public: public:
// Constructor and destructor. // Constructor and destructor.
YieldingFlexibleWorkGang(const char* name, int workers, YieldingFlexibleWorkGang(const char* name, uint workers,
bool are_GC_task_threads); bool are_GC_task_threads);
YieldingFlexibleGangTask* yielding_task() const { YieldingFlexibleGangTask* yielding_task() const {
@ -168,7 +168,7 @@ public:
return (YieldingFlexibleGangTask*)task(); return (YieldingFlexibleGangTask*)task();
} }
// Allocate a worker and return a pointer to it. // Allocate a worker and return a pointer to it.
GangWorker* allocate_worker(int which); GangWorker* allocate_worker(uint which);
// Run a task; returns when the task is done, or the workers yield, // Run a task; returns when the task is done, or the workers yield,
// or the task is aborted, or the work gang is terminated via stop(). // or the task is aborted, or the work gang is terminated via stop().
@ -199,12 +199,12 @@ public:
void abort(); void abort();
private: private:
int _yielded_workers; uint _yielded_workers;
void wait_for_gang(); void wait_for_gang();
public: public:
// Accessors for fields // Accessors for fields
int yielded_workers() const { uint yielded_workers() const {
return _yielded_workers; return _yielded_workers;
} }