This commit is contained in:
Bengt Rutisson 2015-10-09 20:45:45 +00:00
commit 30e8dff916
6 changed files with 79 additions and 64 deletions

View File

@ -1654,7 +1654,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_collectorState = Resetting; _collectorState = Resetting;
assert(_restart_addr == NULL, assert(_restart_addr == NULL,
"Should have been NULL'd before baton was passed"); "Should have been NULL'd before baton was passed");
reset(false /* == !concurrent */); reset_stw();
_cmsGen->reset_after_compaction(); _cmsGen->reset_after_compaction();
_concurrent_cycles_since_last_unload = 0; _concurrent_cycles_since_last_unload = 0;
@ -1934,7 +1934,7 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
} }
case Resetting: case Resetting:
// CMS heap resizing has been completed // CMS heap resizing has been completed
reset(true); reset_concurrent();
assert(_collectorState == Idling, "Collector state should " assert(_collectorState == Idling, "Collector state should "
"have changed"); "have changed");
@ -5698,68 +5698,71 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
// Reset CMS data structures (for now just the marking bit map) // Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle. // preparatory for the next cycle.
void CMSCollector::reset(bool concurrent) { void CMSCollector::reset_concurrent() {
if (concurrent) { CMSTokenSyncWithLocks ts(true, bitMapLock());
CMSTokenSyncWithLocks ts(true, bitMapLock());
// If the state is not "Resetting", the foreground thread // If the state is not "Resetting", the foreground thread
// has done a collection and the resetting. // has done a collection and the resetting.
if (_collectorState != Resetting) { if (_collectorState != Resetting) {
assert(_collectorState == Idling, "The state should only change" assert(_collectorState == Idling, "The state should only change"
" because the foreground collector has finished the collection"); " because the foreground collector has finished the collection");
return; return;
}
// Clear the mark bitmap (no grey objects to start with)
// for the next cycle.
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
HeapWord* curAddr = _markBitMap.startWord();
while (curAddr < _markBitMap.endWord()) {
size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
_markBitMap.clear_large_range(chunk);
if (ConcurrentMarkSweepThread::should_yield() &&
!foregroundGCIsActive() &&
CMSYield) {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
assert_lock_strong(bitMapLock());
bitMapLock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
stopTimer();
if (PrintCMSStatistics != 0) {
incrementYields();
}
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
}
ConcurrentMarkSweepThread::synchronize(true);
bitMapLock()->lock_without_safepoint_check();
startTimer();
}
curAddr = chunk.end();
}
// A successful mostly concurrent collection has been done.
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
size_policy()->reset_gc_overhead_limit_count();
_collectorState = Idling;
} else {
// already have the lock
assert(_collectorState == Resetting, "just checking");
assert_lock_strong(bitMapLock());
_markBitMap.clear_all();
_collectorState = Idling;
} }
// Clear the mark bitmap (no grey objects to start with)
// for the next cycle.
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
HeapWord* curAddr = _markBitMap.startWord();
while (curAddr < _markBitMap.endWord()) {
size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
_markBitMap.clear_large_range(chunk);
if (ConcurrentMarkSweepThread::should_yield() &&
!foregroundGCIsActive() &&
CMSYield) {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
assert_lock_strong(bitMapLock());
bitMapLock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
stopTimer();
if (PrintCMSStatistics != 0) {
incrementYields();
}
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
}
ConcurrentMarkSweepThread::synchronize(true);
bitMapLock()->lock_without_safepoint_check();
startTimer();
}
curAddr = chunk.end();
}
// A successful mostly concurrent collection has been done.
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
size_policy()->reset_gc_overhead_limit_count();
_collectorState = Idling;
register_gc_end();
}
// Same as above but for STW paths
void CMSCollector::reset_stw() {
// already have the lock
assert(_collectorState == Resetting, "just checking");
assert_lock_strong(bitMapLock());
GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
_markBitMap.clear_all();
_collectorState = Idling;
register_gc_end(); register_gc_end();
} }

View File

@ -799,8 +799,10 @@ class CMSCollector: public CHeapObj<mtGC> {
// Concurrent sweeping work // Concurrent sweeping work
void sweepWork(ConcurrentMarkSweepGeneration* old_gen); void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
// (Concurrent) resetting of support data structures // Concurrent resetting of support data structures
void reset(bool concurrent); void reset_concurrent();
// Resetting of support data structures from a STW full GC
void reset_stw();
// Clear _expansion_cause fields of constituent generations // Clear _expansion_cause fields of constituent generations
void clear_expansion_cause(); void clear_expansion_cause();

View File

@ -109,7 +109,7 @@ void ConcurrentMarkThread::run() {
break; break;
} }
GCIdMark gc_id_mark; assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
{ {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;

View File

@ -2612,15 +2612,18 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
} }
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) { void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
GCIdMarkAndRestore conc_gc_id_mark;
collector_state()->set_concurrent_cycle_started(true); collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time); _gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm); trace_heap_before_gc(_gc_tracer_cm);
_cmThread->set_gc_id(GCId::current());
} }
void G1CollectedHeap::register_concurrent_cycle_end() { void G1CollectedHeap::register_concurrent_cycle_end() {
if (collector_state()->concurrent_cycle_started()) { if (collector_state()->concurrent_cycle_started()) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) { if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure(); _gc_tracer_cm->report_concurrent_mode_failure();
} }
@ -2643,6 +2646,7 @@ void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
// but before the concurrent cycle end has been registered. // but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once. // Make sure that we only send the heap information once.
if (!_heap_summary_sent) { if (!_heap_summary_sent) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
trace_heap_after_gc(_gc_tracer_cm); trace_heap_after_gc(_gc_tracer_cm);
_heap_summary_sent = true; _heap_summary_sent = true;
} }

View File

@ -60,6 +60,11 @@ GCIdMark::~GCIdMark() {
} }
GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) { GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) {
_previous_gc_id = GCId::current(); // will assert that the GC Id is not undefined
currentNamedthread()->set_gc_id(_gc_id);
}
GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) {
_previous_gc_id = GCId::current(); // will assert that the GC Id is not undefinied _previous_gc_id = GCId::current(); // will assert that the GC Id is not undefinied
currentNamedthread()->set_gc_id(_gc_id); currentNamedthread()->set_gc_id(_gc_id);
} }

View File

@ -55,6 +55,7 @@ class GCIdMarkAndRestore : public StackObj {
uint _previous_gc_id; uint _previous_gc_id;
public: public:
GCIdMarkAndRestore(); GCIdMarkAndRestore();
GCIdMarkAndRestore(uint gc_id);
~GCIdMarkAndRestore(); ~GCIdMarkAndRestore();
}; };