8134626: Misc cleanups after generation array removal

Reviewed-by: david, dholmes, tschatzl
This commit is contained in:
Jesper Wilhelmsson 2015-08-18 21:32:21 +02:00
parent c166f75b22
commit 49fb91407d
40 changed files with 653 additions and 702 deletions

View File

@ -2989,7 +2989,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
assert(task_size > CardTableModRefBS::card_size_in_words &&
(task_size % CardTableModRefBS::card_size_in_words == 0),
"Otherwise arithmetic below would be incorrect");
MemRegion span = _gen->reserved();
MemRegion span = _old_gen->reserved();
if (low != NULL) {
if (span.contains(low)) {
// Align low down to a card boundary so that

View File

@ -99,7 +99,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
BlockOffsetArrayNonContigSpace _bt;
CMSCollector* _collector;
ConcurrentMarkSweepGeneration* _gen;
ConcurrentMarkSweepGeneration* _old_gen;
// Data structures for free blocks (used during allocation/sweeping)

View File

@ -212,7 +212,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
use_adaptive_freelists,
dictionaryChoice);
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
_cmsSpace->_gen = this;
_cmsSpace->_old_gen = this;
_gc_stats = new CMSGCStats();
@ -359,7 +359,7 @@ double CMSStats::time_until_cms_gen_full() const {
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote
// for the next minor collection. Use the padded average as
// for the next young collection. Use the padded average as
// a safety factor.
cms_free -= expected_promotion;
@ -592,7 +592,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
// Clip CMSBootstrapOccupancy between 0 and 100.
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
_bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
// Now tell CMS generations the identity of their collector
ConcurrentMarkSweepGeneration::set_collector(this);
@ -795,29 +795,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
gclog_or_tty->print_cr(" Desired free fraction %f",
desired_free_percentage);
gclog_or_tty->print_cr(" Maximum free fraction %f",
maximum_free_percentage);
gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage);
gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage);
gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000);
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT,
desired_capacity/1000);
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
size_t young_size = gch->young_gen()->capacity();
gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT,
unsafe_max_alloc_nogc()/1000);
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT,
contiguous_available()/1000);
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)",
expand_bytes);
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
}
// safe if expansion fails
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Expanded free fraction %f",
((double) free()) / capacity());
gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity());
}
} else {
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
@ -834,11 +827,9 @@ Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
return cmsSpace()->freelistLock();
}
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
bool tlab) {
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
CMSSynchronousYieldRequest yr;
MutexLockerEx x(freelistLock(),
Mutex::_no_safepoint_check_flag);
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
return have_lock_and_allocate(size, tlab);
}
@ -2426,7 +2417,7 @@ void CMSCollector::verify_after_remark_work_1() {
gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen,
true, // younger gens are roots
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
@ -2498,7 +2489,7 @@ void CMSCollector::verify_after_remark_work_2() {
gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen,
true, // younger gens are roots
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
@ -2952,12 +2943,7 @@ void CMSCollector::checkpointRootsInitialWork() {
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
assert(_collectorState == InitialMarking, "just checking");
// If there has not been a GC[n-1] since last GC[n] cycle completed,
// precede our marking with a collection of all
// younger generations to keep floating garbage to a minimum.
// XXX: we won't do this for now -- it's an optimization to be done later.
// already have locks
// Already have locks.
assert_lock_strong(bitMapLock());
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
@ -3027,7 +3013,7 @@ void CMSCollector::checkpointRootsInitialWork() {
gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen,
true, // younger gens are roots
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
@ -3037,7 +3023,7 @@ void CMSCollector::checkpointRootsInitialWork() {
}
// Clear mod-union table; it will be dirtied in the prologue of
// CMS generation per each younger generation collection.
// CMS generation per each young generation collection.
assert(_modUnionTable.isAllClear(),
"Was cleared in most recent final checkpoint phase"
@ -3057,7 +3043,7 @@ bool CMSCollector::markFromRoots() {
// assert(!SafepointSynchronize::is_at_safepoint(),
// "inconsistent argument?");
// However that wouldn't be right, because it's possible that
// a safepoint is indeed in progress as a younger generation
// a safepoint is indeed in progress as a young generation
// stop-the-world GC happens even as we mark in this generation.
assert(_collectorState == Marking, "inconsistent state?");
check_correct_thread_executing();
@ -3065,7 +3051,7 @@ bool CMSCollector::markFromRoots() {
// Weak ref discovery note: We may be discovering weak
// refs in this generation concurrent (but interleaved) with
// weak ref discovery by a younger generation collector.
// weak ref discovery by the young generation collector.
CMSTokenSyncWithLocks ts(true, bitMapLock());
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
@ -3095,7 +3081,7 @@ bool CMSCollector::markFromRootsWork() {
// Note that when we do a marking step we need to hold the
// bit map lock -- recall that direct allocation (by mutators)
// and promotion (by younger generation collectors) is also
// and promotion (by the young generation collector) is also
// marking the bit map. [the so-called allocate live policy.]
// Because the implementation of bit map marking is not
// robust wrt simultaneous marking of bits in the same word,
@ -4049,7 +4035,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
// one of these methods, please check the other method too.
size_t CMSCollector::preclean_mod_union_table(
ConcurrentMarkSweepGeneration* gen,
ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl) {
verify_work_stacks_empty();
verify_overflow_empty();
@ -4064,10 +4050,10 @@ size_t CMSCollector::preclean_mod_union_table(
// generation, but we might potentially miss cards when the
// generation is rapidly expanding while we are in the midst
// of precleaning.
HeapWord* startAddr = gen->reserved().start();
HeapWord* endAddr = gen->reserved().end();
HeapWord* startAddr = old_gen->reserved().start();
HeapWord* endAddr = old_gen->reserved().end();
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
size_t numDirtyCards, cumNumDirtyCards;
HeapWord *nextAddr, *lastAddr;
@ -4109,7 +4095,7 @@ size_t CMSCollector::preclean_mod_union_table(
HeapWord* stop_point = NULL;
stopTimer();
// Potential yield point
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
bitMapLock());
startTimer();
{
@ -4117,7 +4103,7 @@ size_t CMSCollector::preclean_mod_union_table(
verify_overflow_empty();
sample_eden();
stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
}
if (stop_point != NULL) {
// The careful iteration stopped early either because it found an
@ -4152,15 +4138,15 @@ size_t CMSCollector::preclean_mod_union_table(
// below are largely identical; if you need to modify
// one of these methods, please check the other method too.
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl) {
// strategy: it's similar to precleamModUnionTable above, in that
// we accumulate contiguous ranges of dirty cards, mark these cards
// precleaned, then scan the region covered by these cards.
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
size_t numDirtyCards, cumNumDirtyCards;
HeapWord *lastAddr, *nextAddr;
@ -4197,13 +4183,13 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
if (!dirtyRegion.is_empty()) {
stopTimer();
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
startTimer();
sample_eden();
verify_work_stacks_empty();
verify_overflow_empty();
HeapWord* stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
if (stop_point != NULL) {
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
"Should only be AbortablePreclean.");
@ -5086,7 +5072,7 @@ void CMSCollector::do_remark_parallel() {
// preclean phase did of eden, plus the [two] tasks of
// scanning the [two] survivor spaces. Further fine-grain
// parallelization of the scanning of the survivor spaces
// themselves, and of precleaning of the younger gen itself
// themselves, and of precleaning of the young gen itself
// is deferred to the future.
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
@ -5177,7 +5163,7 @@ void CMSCollector::do_remark_non_parallel() {
gch->gen_process_roots(&srs,
GenCollectedHeap::OldGen,
true, // younger gens as roots
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&mrias_cl,
@ -5661,7 +5647,7 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generati
}
}
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
// We iterate over the space(s) underlying this generation,
// checking the mark bit map to see if the bits corresponding
// to specific blocks are marked or not. Blocks that are
@ -5690,26 +5676,26 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
// check that we hold the requisite locks
assert(have_cms_token(), "Should hold cms token");
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
assert_lock_strong(gen->freelistLock());
assert_lock_strong(old_gen->freelistLock());
assert_lock_strong(bitMapLock());
assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
_inter_sweep_estimate.padded_average(),
_intra_sweep_estimate.padded_average());
gen->setNearLargestChunk();
old_gen->setNearLargestChunk();
{
SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
// We need to free-up/coalesce garbage/blocks from a
// co-terminal free run. This is done in the SweepClosure
// destructor; so, do not remove this scope, else the
// end-of-sweep-census below will be off by a little bit.
}
gen->cmsSpace()->sweep_completed();
gen->cmsSpace()->endSweepFLCensus(sweep_count());
old_gen->cmsSpace()->sweep_completed();
old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes,

View File

@ -723,7 +723,7 @@ class CMSCollector: public CHeapObj<mtGC> {
private:
// Support for parallelizing young gen rescan in CMS remark phase
ParNewGeneration* _young_gen; // the younger gen
ParNewGeneration* _young_gen;
HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
@ -772,9 +772,9 @@ class CMSCollector: public CHeapObj<mtGC> {
private:
// Concurrent precleaning work
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl);
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
ScanMarkedObjectsAgainCarefullyClosure* cl);
// Does precleaning work, returning a quantity indicative of
// the amount of "useful work" done.
@ -797,7 +797,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void refProcessingWork();
// Concurrent sweeping work
void sweepWork(ConcurrentMarkSweepGeneration* gen);
void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
// (Concurrent) resetting of support data structures
void reset(bool concurrent);
@ -1120,10 +1120,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
MemRegion used_region_at_save_marks() const;
// Does a "full" (forced) collection invoked on this generation collect
// all younger generations as well? Note that the second conjunct is a
// hack to allow the collection of the younger gen first if the flag is
// set.
virtual bool full_collects_younger_generations() const {
// the young generation as well?
virtual bool full_collects_young_generation() const {
return !ScavengeBeforeFullGC;
}
@ -1153,9 +1151,8 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
// Inform this (non-young) generation that a promotion failure was
// encountered during a collection of a younger generation that
// promotes into this generation.
// Inform this (old) generation that a promotion failure was
// encountered during a collection of the young generation.
virtual void promotion_failure_occurred();
bool should_collect(bool full, size_t size, bool tlab);

View File

@ -295,7 +295,7 @@ inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
}
// If the younger gen collections were skipped, then the
// If the young gen collection was skipped, then the
// number of promoted bytes will be 0 and adding it to the
// average will incorrectly lessen the average. It is, however,
// also possible that no promotion was needed.

View File

@ -39,23 +39,17 @@
// ======= Concurrent Mark Sweep Thread ========
// The CMS thread is created when Concurrent Mark Sweep is used in the
// older of two generations in a generational memory system.
ConcurrentMarkSweepThread*
ConcurrentMarkSweepThread::_cmst = NULL;
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
SurrogateLockerThread*
ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread::SLT_msg_type
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
Monitor*
ConcurrentMarkSweepThread::_sltMonitor = NULL;
Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
: ConcurrentGCThread() {

View File

@ -69,20 +69,28 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
Stack<oop, mtGC>* overflow_stacks_,
size_t desired_plab_sz_,
ParallelTaskTerminator& term_) :
_to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
_to_space(to_space_),
_old_gen(old_gen_),
_young_gen(young_gen_),
_thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)),
_to_space_full(false),
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
_ageTable(false), // false ==> not the global age table, no perf data.
_to_space_alloc_buffer(desired_plab_sz_),
_to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
_to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
_to_space_closure(young_gen_, this),
_old_gen_closure(young_gen_, this),
_to_space_root_closure(young_gen_, this),
_old_gen_root_closure(young_gen_, this),
_older_gen_closure(young_gen_, this),
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
&_to_space_root_closure, young_gen_, &_old_gen_root_closure,
work_queue_set_, &term_),
_is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
_is_alive_closure(young_gen_),
_scan_weak_ref_closure(young_gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure),
_strong_roots_time(0.0), _term_time(0.0)
_strong_roots_time(0.0),
_term_time(0.0)
{
#if TASKQUEUE_STATS
_term_attempts = 0;
@ -90,8 +98,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
_overflow_refill_objs = 0;
#endif // TASKQUEUE_STATS
_survivor_chunk_array =
(ChunkArray*) old_gen()->get_data_recorder(thread_num());
_survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
_hash_seed = 17; // Might want to take time-based random value.
_start = os::elapsedTime();
_old_gen_closure.set_generation(old_gen_);
@ -154,7 +161,6 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
}
}
void ParScanThreadState::trim_queues(int max_size) {
ObjToScanQueue* queue = work_queue();
do {
@ -222,15 +228,12 @@ void ParScanThreadState::push_on_overflow_stack(oop p) {
}
HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
// Otherwise, if the object is small enough, try to reallocate the
// buffer.
// If the object is small enough, try to reallocate the buffer.
HeapWord* obj = NULL;
if (!_to_space_full) {
PLAB* const plab = to_space_alloc_buffer();
Space* const sp = to_space();
if (word_sz * 100 <
ParallelGCBufferWastePct * plab->word_sz()) {
if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
// Is small enough; abandon this buffer and start a new one.
plab->retire();
size_t buf_size = plab->word_sz();
@ -241,8 +244,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
size_t free_bytes = sp->free();
while(buf_space == NULL && free_bytes >= min_bytes) {
buf_size = free_bytes >> LogHeapWordSize;
assert(buf_size == (size_t)align_object_size(buf_size),
"Invariant");
assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
buf_space = sp->par_allocate(buf_size);
free_bytes = sp->free();
}
@ -262,7 +264,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
// We're used up.
_to_space_full = true;
}
} else {
// Too large; allocate the object individually.
obj = sp->par_allocate(word_sz);
@ -271,7 +272,6 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
return obj;
}
void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
to_space_alloc_buffer()->undo_allocation(obj, word_sz);
}
@ -288,7 +288,7 @@ public:
// Initializes states for the specified number of threads;
ParScanThreadStateSet(int num_threads,
Space& to_space,
ParNewGeneration& gen,
ParNewGeneration& young_gen,
Generation& old_gen,
ObjToScanQueueSet& queue_set,
Stack<oop, mtGC>* overflow_stacks_,
@ -315,21 +315,25 @@ public:
private:
ParallelTaskTerminator& _term;
ParNewGeneration& _gen;
ParNewGeneration& _young_gen;
Generation& _old_gen;
public:
bool is_valid(int id) const { return id < length(); }
ParallelTaskTerminator* terminator() { return &_term; }
};
ParScanThreadStateSet::ParScanThreadStateSet(
int num_threads, Space& to_space, ParNewGeneration& gen,
Generation& old_gen, ObjToScanQueueSet& queue_set,
ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
Space& to_space,
ParNewGeneration& young_gen,
Generation& old_gen,
ObjToScanQueueSet& queue_set,
Stack<oop, mtGC>* overflow_stacks,
size_t desired_plab_sz, ParallelTaskTerminator& term)
size_t desired_plab_sz,
ParallelTaskTerminator& term)
: ResourceArray(sizeof(ParScanThreadState), num_threads),
_gen(gen), _old_gen(old_gen), _term(term)
_young_gen(young_gen),
_old_gen(old_gen),
_term(term)
{
assert(num_threads > 0, "sanity check!");
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
@ -337,13 +341,12 @@ ParScanThreadStateSet::ParScanThreadStateSet(
// Initialize states.
for (int i = 0; i < num_threads; ++i) {
new ((ParScanThreadState*)_data + i)
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
overflow_stacks, desired_plab_sz, term);
}
}
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
{
inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
assert(i >= 0 && i < length(), "sanity check!");
return ((ParScanThreadState*)_data)[i];
}
@ -357,8 +360,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace
}
}
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
{
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
_term.reset_for_reuse(active_threads);
if (promotion_failed) {
for (int i = 0; i < length(); ++i) {
@ -368,36 +370,27 @@ void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
}
#if TASKQUEUE_STATS
void
ParScanThreadState::reset_stats()
{
void ParScanThreadState::reset_stats() {
taskqueue_stats().reset();
_term_attempts = 0;
_overflow_refills = 0;
_overflow_refill_objs = 0;
}
void ParScanThreadStateSet::reset_stats()
{
void ParScanThreadStateSet::reset_stats() {
for (int i = 0; i < length(); ++i) {
thread_state(i).reset_stats();
}
}
void
ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
{
void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
st->print_raw_cr("GC Termination Stats");
st->print_raw_cr(" elapsed --strong roots-- "
"-------termination-------");
st->print_raw_cr("thr ms ms % "
" ms % attempts");
st->print_raw_cr("--- --------- --------- ------ "
"--------- ------ --------");
st->print_raw_cr(" elapsed --strong roots-- -------termination-------");
st->print_raw_cr("thr ms ms % ms % attempts");
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
}
void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
{
void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
print_termination_stats_hdr(st);
for (int i = 0; i < length(); ++i) {
@ -405,23 +398,20 @@ void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
const double elapsed_ms = pss.elapsed_time() * 1000.0;
const double s_roots_ms = pss.strong_roots_time() * 1000.0;
const double term_ms = pss.term_time() * 1000.0;
st->print_cr("%3d %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8),
st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
}
}
// Print stats related to work queue activity.
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
{
void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
st->print_raw_cr("GC Task Stats");
st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
}
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
{
void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
print_taskqueue_stats_hdr(st);
TaskQueueStats totals;
@ -443,8 +433,7 @@ void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
}
#endif // TASKQUEUE_STATS
void ParScanThreadStateSet::flush()
{
void ParScanThreadStateSet::flush() {
// Work in this loop should be kept as lightweight as
// possible since this might otherwise become a bottleneck
// to scaling. Should we add heavy-weight work into this
@ -454,12 +443,12 @@ void ParScanThreadStateSet::flush()
// Flush stats related to To-space PLAB activity and
// retire the last buffer.
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
// Every thread has its own age table. We need to merge
// them all into one.
ageTable *local_table = par_scan_state.age_table();
_gen.age_table()->merge(local_table);
_young_gen.age_table()->merge(local_table);
// Inform old gen that we're done.
_old_gen.par_promote_alloc_done(i);
@ -478,8 +467,7 @@ void ParScanThreadStateSet::flush()
ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
{
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
_boundary = _g->reserved().end();
}
@ -531,24 +519,23 @@ void ParEvacuateFollowersClosure::do_void() {
ObjToScanQueue* work_q = par_scan_state()->work_queue();
while (true) {
// Scan to-space and old-gen objs until we run out of both.
oop obj_to_scan;
par_scan_state()->trim_queues(0);
// We have no local work, attempt to steal from other threads.
// attempt to steal work from promoted.
// Attempt to steal work from promoted.
if (task_queues()->steal(par_scan_state()->thread_num(),
par_scan_state()->hash_seed(),
obj_to_scan)) {
bool res = work_q->push(obj_to_scan);
assert(res, "Empty queue should have room for a push.");
// if successful, goto Start.
// If successful, goto Start.
continue;
// try global overflow list.
// Try global overflow list.
} else if (par_gen()->take_from_overflow_list(par_scan_state())) {
continue;
}
@ -564,8 +551,10 @@ void ParEvacuateFollowersClosure::do_void() {
par_scan_state()->end_term_time();
}
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
Generation* old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet* state_set,
StrongRootsScope* strong_roots_scope) :
AbstractGangTask("ParNewGeneration collection"),
_young_gen(young_gen), _old_gen(old_gen),
@ -595,8 +584,7 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.start_strong_roots();
gch->gen_process_roots(_strong_roots_scope,
GenCollectedHeap::YoungGen,
true, // Process younger gens, if any,
// as strong roots.
true, // Process younger gens, if any, as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache,
GenCollectedHeap::StrongAndWeakRoots,
&par_scan_state.to_space_root_closure(),
@ -613,8 +601,7 @@ void ParNewGenTask::work(uint worker_id) {
#pragma warning( push )
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif
ParNewGeneration::
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
: DefNewGeneration(rs, initial_byte_size, "PCopy"),
_overflow_list(NULL),
_is_alive_closure(this),
@ -625,20 +612,19 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
_task_queues = new ObjToScanQueueSet(ParallelGCThreads);
guarantee(_task_queues != NULL, "task_queues allocation failure.");
for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
for (uint i = 0; i < ParallelGCThreads; i++) {
ObjToScanQueue *q = new ObjToScanQueue();
guarantee(q != NULL, "work_queue Allocation failure.");
_task_queues->register_queue(i1, q);
_task_queues->register_queue(i, q);
}
for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
_task_queues->queue(i2)->initialize();
for (uint i = 0; i < ParallelGCThreads; i++) {
_task_queues->queue(i)->initialize();
}
_overflow_stacks = NULL;
if (ParGCUseLocalOverflow) {
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
// with ','
// typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
typedef Stack<oop, mtGC> GCOopStack;
_overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
@ -742,7 +728,7 @@ class ParNewRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
public:
ParNewRefProcTaskProxy(ProcessTask& task,
ParNewGeneration& gen,
ParNewGeneration& young_gen,
Generation& old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set);
@ -768,11 +754,9 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
_old_gen(old_gen),
_young_old_boundary(young_old_boundary),
_state_set(state_set)
{
}
{ }
void ParNewRefProcTaskProxy::work(uint worker_id)
{
void ParNewRefProcTaskProxy::work(uint worker_id) {
ResourceMark rm;
HandleMark hm;
ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
@ -792,15 +776,12 @@ public:
_task(task)
{ }
virtual void work(uint worker_id)
{
virtual void work(uint worker_id) {
_task.work(worker_id);
}
};
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
{
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
@ -812,8 +793,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
_young_gen.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
{
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
@ -821,8 +801,7 @@ void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
workers->run_task(&enq_task);
}
void ParNewRefProcTaskExecutor::set_single_threaded_mode()
{
void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
_state_set.flush();
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->save_marks();
@ -830,7 +809,8 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
ScanClosureWithParBarrier::
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
ScanClosure(g, gc_barrier) {}
ScanClosure(g, gc_barrier)
{ }
EvacuateFollowersClosureGeneral::
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
@ -850,7 +830,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
} while (!_gch->no_allocs_since_save_marks());
}
// A Generation that does parallel young-gen collection.
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
@ -996,9 +975,9 @@ void ParNewGeneration::collect(bool full,
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// collection(s). When a young collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// and redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
@ -1113,8 +1092,10 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
// thus avoiding the need to undo the copy as in
// copy_to_survivor_space_avoiding_with_undo.
oop ParNewGeneration::copy_to_survivor_space(
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop old,
size_t sz,
markOop m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.
@ -1141,8 +1122,7 @@ oop ParNewGeneration::copy_to_survivor_space(
}
if (new_obj == NULL) {
// Either to-space is full or we decided to promote
// try allocating obj tenured
// Either to-space is full or we decided to promote try allocating obj tenured
// Attempt to install a null forwarding pointer (atomically),
// to claim the right to install the real forwarding pointer.

View File

@ -71,11 +71,7 @@ class ParScanThreadState {
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
// One of these two will be passed to process_roots, which will
// set its generation. The first is for two-gen configs where the
// old gen collects the perm gen; the second is for arbitrary configs.
// The second isn't used right now (it used to be used for the train, an
// incremental collector) but the declaration has been left as a reminder.
// Will be passed to process_roots to set its generation.
ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
// This closure will always be bound to the old gen; it will be used
// in evacuate_followers.
@ -85,7 +81,6 @@ class ParScanThreadState {
ParScanWeakRefClosure _scan_weak_ref_closure;
ParKeepAliveClosure _keep_alive_closure;
Space* _to_space;
Space* to_space() { return _to_space; }

View File

@ -35,7 +35,7 @@ private:
// We encode the value of the heap region type so the generation can be
// determined quickly. The tag is split into two parts:
//
// major type (young, humongous) : top N-1 bits
// major type (young, old, humongous, archive) : top N-1 bits
// minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
//
// If there's need to increase the number of minor types in the

View File

@ -30,26 +30,22 @@
#include "gc/parallel/psParallelCompact.hpp"
#include "gc/parallel/psScavenge.hpp"
inline size_t ParallelScavengeHeap::total_invocations()
{
inline size_t ParallelScavengeHeap::total_invocations() {
return UseParallelOldGC ? PSParallelCompact::total_invocations() :
PSMarkSweep::total_invocations();
}
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
{
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
return size < eden_size / 2;
}
inline void ParallelScavengeHeap::invoke_scavenge()
{
inline void ParallelScavengeHeap::invoke_scavenge() {
PSScavenge::invoke();
}
inline bool ParallelScavengeHeap::is_in_young(oop p) {
// Assumes the the old gen address range is lower than that of the young gen.
const void* loc = (void*) p;
bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
assert(result == young_gen()->is_in_reserved(p),
err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));

View File

@ -486,12 +486,12 @@ void PSOldGen::verify() {
object_space()->verify();
}
class VerifyObjectStartArrayClosure : public ObjectClosure {
PSOldGen* _gen;
PSOldGen* _old_gen;
ObjectStartArray* _start_array;
public:
VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
_gen(gen), _start_array(start_array) { }
VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
_old_gen(old_gen), _start_array(start_array) { }
virtual void do_object(oop obj) {
HeapWord* test_addr = (HeapWord*)obj + 1;

View File

@ -958,7 +958,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
{
// Update the from & to space pointers in space_info, since they are swapped
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of minor
// promotion failure does not swap spaces) because an unknown number of young
// collections will have swapped the spaces an unknown number of times.
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();

View File

@ -979,7 +979,6 @@ class PSParallelCompact : AllStatic {
static bool _dwl_initialized;
#endif // #ifdef ASSERT
public:
static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }

View File

@ -597,9 +597,9 @@ bool PSScavenge::invoke_no_policy() {
// to allow resizes that may have been inhibited by the
// relative location of the "to" and "from" spaces.
// Resizing the old gen at minor collects can cause increases
// Resizing the old gen at young collections can cause increases
// that don't feed back to the generation sizing policy until
// a major collection. Don't resize the old gen here.
// a full collection. Don't resize the old gen here.
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
size_policy->calculated_survivor_size_in_bytes());

View File

@ -172,10 +172,10 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
// There are not old-to-young pointers if the old gen is empty.
assert(!_gen->object_space()->is_empty(),
assert(!_old_gen->object_space()->is_empty(),
"Should not be called is there is no work");
assert(_gen != NULL, "Sanity");
assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
assert(_old_gen != NULL, "Sanity");
assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
assert(_stripe_number < ParallelGCThreads, "Sanity");
{
@ -183,8 +183,8 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
CardTableExtension* card_table =
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
card_table->scavenge_contents_parallel(_gen->start_array(),
_gen->object_space(),
card_table->scavenge_contents_parallel(_old_gen->start_array(),
_old_gen->object_space(),
_gen_top,
pm,
_stripe_number,

View File

@ -160,17 +160,17 @@ class StealTask : public GCTask {
class OldToYoungRootsTask : public GCTask {
private:
PSOldGen* _gen;
PSOldGen* _old_gen;
HeapWord* _gen_top;
uint _stripe_number;
uint _stripe_total;
public:
OldToYoungRootsTask(PSOldGen *gen,
OldToYoungRootsTask(PSOldGen *old_gen,
HeapWord* gen_top,
uint stripe_number,
uint stripe_total) :
_gen(gen),
_old_gen(old_gen),
_gen_top(gen_top),
_stripe_number(stripe_number),
_stripe_total(stripe_total) { }

View File

@ -106,14 +106,14 @@ FastEvacuateFollowersClosure(GenCollectedHeap* gch,
_gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
{
assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
_gen = (DefNewGeneration*)_gch->young_gen();
_young_gen = (DefNewGeneration*)_gch->young_gen();
}
void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
do {
_gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
} while (!_gch->no_allocs_since_save_marks());
guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
}
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@ -200,8 +200,9 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
vm_exit_during_initialization("Could not allocate a new gen space");
}
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
@ -655,7 +656,7 @@ void DefNewGeneration::collect(bool full,
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// collection(s). When a young collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to

View File

@ -193,7 +193,7 @@ protected:
class FastEvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch;
DefNewGeneration* _gen;
DefNewGeneration* _young_gen;
FastScanClosure* _scan_cur_or_nonheap;
FastScanClosure* _scan_older;
public:

View File

@ -57,8 +57,8 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
// each generation, allowing them in turn to examine the modified
// field.
//
// We could check that p is also in an older generation, but
// dirty cards in the youngest gen are never scanned, so the
// We could check that p is also in the old generation, but
// dirty cards in the young gen are never scanned, so the
// extra check probably isn't worthwhile.
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);

View File

@ -108,7 +108,7 @@ bool TenuredGeneration::should_collect(bool full,
free());
}
}
// If we had to expand to accommodate promotions from younger generations
// If we had to expand to accommodate promotions from the young generation
if (!result && _capacity_at_prologue < capacity()) {
result = true;
if (PrintGC && Verbose) {
@ -140,11 +140,11 @@ void TenuredGeneration::update_gc_stats(Generation* current_generation,
// that are of interest at this point.
bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
if (!full && current_is_young) {
// Calculate size of data promoted from the younger generations
// Calculate size of data promoted from the young generation
// before doing the collection.
size_t used_before_gc = used();
// If the younger gen collections were skipped, then the
// If the young gen collection was skipped, then the
// number of promoted bytes will be 0 and adding it to the
// average will incorrectly lessen the average. It is, however,
// also possible that no promotion was needed.

View File

@ -54,6 +54,7 @@ class TenuredGeneration: public CardGeneration {
ContiguousSpace* space() const { return _the_space; }
void assert_correct_size_change_locking();
public:
TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
@ -66,10 +67,9 @@ class TenuredGeneration: public CardGeneration {
const char* short_name() const { return "Tenured"; }
// Does a "full" (forced) collection invoked on this generation collect
// all younger generations as well? Note that this is a
// hack to allow the collection of the younger gen first if the flag is
// set.
virtual bool full_collects_younger_generations() const {
// the young generation as well? Note that this is a hack to allow the
// collection of the young gen first if the flag is set.
virtual bool full_collects_young_generation() const {
return !ScavengeBeforeFullGC;
}
@ -99,15 +99,16 @@ class TenuredGeneration: public CardGeneration {
bool clear_all_soft_refs,
size_t size,
bool is_tlab);
HeapWord* expand_and_allocate(size_t size,
bool is_tlab,
bool parallel = false);
virtual void prepare_for_verify();
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
bool should_collect(bool full,
size_t word_size,
bool is_tlab);

View File

@ -266,7 +266,7 @@ void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
}
// The policy does not have enough data until at least some
// minor collections have been done.
// young collections have been done.
_young_gen_policy_is_ready =
(_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
@ -295,8 +295,7 @@ void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
_minor_timer.start();
}
size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
uint percent_change) {
size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
size_t eden_heap_delta;
eden_heap_delta = cur_eden / 100 * percent_change;
return eden_heap_delta;
@ -312,8 +311,7 @@ size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
return eden_heap_delta;
}
size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
uint percent_change) {
size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
size_t promo_heap_delta;
promo_heap_delta = cur_promo / 100 * percent_change;
return promo_heap_delta;

View File

@ -80,7 +80,9 @@ jbyte CardTableRS::find_unused_youngergenP_card_value() {
break;
}
}
if (!seen) return v;
if (!seen) {
return v;
}
}
ShouldNotReachHere();
return 0;
@ -502,7 +504,7 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
//
// The main point below is that the parallel card scanning code
// deals correctly with these stale card values. There are two main
// cases to consider where we have a stale "younger gen" value and a
// cases to consider where we have a stale "young gen" value and a
// "derivative" case to consider, where we have a stale
// "cur_younger_gen_and_prev_non_clean" value, as will become
// apparent in the case analysis below.

View File

@ -160,16 +160,20 @@ void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
// Memory state functions.
CollectedHeap::CollectedHeap() {
CollectedHeap::CollectedHeap() :
_barrier_set(NULL),
_is_gc_active(false),
_total_collections(0),
_total_full_collections(0),
_gc_cause(GCCause::_no_gc),
_gc_lastcause(GCCause::_no_gc),
_defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
{
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() +
max_len / elements_per_word);
_barrier_set = NULL;
_is_gc_active = false;
_total_collections = _total_full_collections = 0;
_gc_cause = _gc_lastcause = GCCause::_no_gc;
NOT_PRODUCT(_promotion_failure_alot_count = 0;)
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
@ -184,7 +188,7 @@ CollectedHeap::CollectedHeap() {
PerfDataManager::create_string_variable(SUN_GC, "lastCause",
80, GCCause::to_string(_gc_lastcause), CHECK);
}
_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
// Create the ring log
if (LogEvents) {
_gc_heap_log = new GCHeapLog();
@ -570,8 +574,8 @@ void CollectedHeap::resize_all_tlabs() {
void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
if (HeapDumpBeforeFullGC) {
GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
// We are doing a "major" collection and a heap dump before
// major collection has been requested.
// We are doing a full collection and a heap dump before
// full collection has been requested.
HeapDumper::dump_heap();
}
if (PrintClassHistogramBeforeFullGC) {

View File

@ -464,7 +464,7 @@ void GenCollectedHeap::do_collection(bool full,
bool prepared_for_verification = false;
bool collected_old = false;
bool old_collects_young = complete &&
_old_gen->full_collects_younger_generations();
_old_gen->full_collects_young_generation();
if (!old_collects_young &&
_young_gen->should_collect(full, size, is_tlab)) {
if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
@ -521,7 +521,7 @@ void GenCollectedHeap::do_collection(bool full,
// a whole heap collection.
complete = complete || collected_old;
if (complete) { // We did a "major" collection
if (complete) { // We did a full collection
// FIXME: See comment at pre_full_gc_dump call
post_full_gc_dump(NULL); // do any post full gc dumps
}
@ -668,13 +668,13 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
GenerationType type,
bool younger_gens_as_roots,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* not_older_gens,
OopsInGenClosure* older_gens,
CLDClosure* cld_closure) {
const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
bool is_moving_collection = false;
if (type == YoungGen || is_adjust_phase) {
@ -691,7 +691,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
cld_closure, weak_cld_closure,
&mark_code_closure);
if (younger_gens_as_roots) {
if (young_gen_as_roots) {
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
if (type == OldGen) {
not_older_gens->set_generation(_young_gen);
@ -763,25 +763,25 @@ HeapWord** GenCollectedHeap::end_addr() const {
void GenCollectedHeap::collect(GCCause::Cause cause) {
if (should_do_concurrent_full_gc(cause)) {
#if INCLUDE_ALL_GCS
// mostly concurrent full collection
// Mostly concurrent full collection.
collect_mostly_concurrent(cause);
#else // INCLUDE_ALL_GCS
ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API
// Young collection for the WhiteBox API.
collect(cause, YoungGen);
} else {
#ifdef ASSERT
if (cause == GCCause::_scavenge_alot) {
// minor collection only
// Young collection only.
collect(cause, YoungGen);
} else {
// Stop-the-world full collection
// Stop-the-world full collection.
collect(cause, OldGen);
}
#else
// Stop-the-world full collection
// Stop-the-world full collection.
collect(cause, OldGen);
#endif
}

View File

@ -173,8 +173,7 @@ public:
size_t max_capacity() const;
HeapWord* mem_allocate(size_t size,
bool* gc_overhead_limit_was_exceeded);
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
// We may support a shared contiguous allocation area, if the youngest
// generation does.
@ -403,7 +402,7 @@ public:
void gen_process_roots(StrongRootsScope* scope,
GenerationType type,
bool younger_gens_as_roots,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* not_older_gens,

View File

@ -110,13 +110,11 @@ public:
virtual void print() {}
// Informs the RS that the given memregion contains no references to
// younger generations.
// the young generation.
virtual void clear(MemRegion mr) = 0;
// Informs the RS that there are no references to generations
// younger than gen from generations gen and older.
// The parameter clear_perm indicates if the perm_gen's
// remembered set should also be processed/cleared.
// Informs the RS that there are no references to the young generation
// from old_gen.
virtual void clear_into_younger(Generation* old_gen) = 0;
// Informs the RS that refs in the given "mr" may have changed

View File

@ -80,7 +80,6 @@ struct ScratchBlock {
// first two fields are word-sized.)
};
class Generation: public CHeapObj<mtGC> {
friend class VMStructs;
private:
@ -299,8 +298,7 @@ class Generation: public CHeapObj<mtGC> {
// word of "obj" may have been overwritten with a forwarding pointer, and
// also taking care to copy the klass pointer *last*. Returns the new
// object if successful, or else NULL.
virtual oop par_promote(int thread_num,
oop obj, markOop m, size_t word_sz);
virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
// Informs the current generation that all par_promote_alloc's in the
// collection have been completed; any supporting data structures can be
@ -315,7 +313,7 @@ class Generation: public CHeapObj<mtGC> {
// This generation will collect all younger generations
// during a full collection.
virtual bool full_collects_younger_generations() const { return false; }
virtual bool full_collects_young_generation() const { return false; }
// This generation does in-place marking, meaning that mark words
// are mutated during the marking phase and presumably reinitialized
@ -370,18 +368,18 @@ class Generation: public CHeapObj<mtGC> {
// Some generations may require some cleanup or preparation actions before
// allowing a collection. The default is to do nothing.
virtual void gc_prologue(bool full) {};
virtual void gc_prologue(bool full) {}
// Some generations may require some cleanup actions after a collection.
// The default is to do nothing.
virtual void gc_epilogue(bool full) {};
virtual void gc_epilogue(bool full) {}
// Save the high water marks for the used space in a generation.
virtual void record_spaces_top() {};
virtual void record_spaces_top() {}
// Some generations may need to be "fixed-up" after some allocation
// activity to make them parsable again. The default is to do nothing.
virtual void ensure_parsability() {};
virtual void ensure_parsability() {}
// Time (in ms) when we were last collected or now if a collection is
// in progress.
@ -432,7 +430,7 @@ class Generation: public CHeapObj<mtGC> {
// Some generations may require some cleanup actions before allowing
// a verification.
virtual void prepare_for_verify() {};
virtual void prepare_for_verify() {}
// Accessing "marks".
@ -483,7 +481,7 @@ class Generation: public CHeapObj<mtGC> {
// Give each generation an opportunity to do clean up for any
// contributed scratch.
virtual void reset_scratch() {};
virtual void reset_scratch() {}
// When an older generation has been collected, and perhaps resized,
// this method will be invoked on all younger generations (from older to

View File

@ -1065,7 +1065,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// can mark through them now, rather than delaying that
// to the reference-processing phase. Since all current
// time-stamp policies advance the soft-ref clock only
// at a major collection cycle, this is always currently
// at a full collection cycle, this is always currently
// accurate.
if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
return false;

View File

@ -213,15 +213,18 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
size_t _size; // size of object to be allocated
Metaspace::MetadataType _mdtype;
ClassLoaderData* _loader_data;
public:
VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
size_t size, Metaspace::MetadataType mdtype,
size_t size,
Metaspace::MetadataType mdtype,
uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
_loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
}
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
MetaWord* result() const { return _result; }

View File

@ -1596,7 +1596,7 @@ public:
"(ParallelGC only)") \
\
product(bool, ScavengeBeforeFullGC, true, \
"Scavenge youngest generation before each full GC.") \
"Scavenge young generation before each full GC.") \
\
develop(bool, ScavengeWithObjectsInToSpace, false, \
"Allow scavenges to occur when to-space contains objects") \
@ -2094,7 +2094,7 @@ public:
"promotion failure") \
\
notproduct(bool, PromotionFailureALot, false, \
"Use promotion failure handling on every youngest generation " \
"Use promotion failure handling on every young generation " \
"collection") \
\
develop(uintx, PromotionFailureALotCount, 1000, \

View File

@ -1360,7 +1360,7 @@ ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
}
// We've successfully installed INFLATING (0) into the mark-word.
// This is the only case where 0 will appear in a mark-work.
// This is the only case where 0 will appear in a mark-word.
// Only the singular thread that successfully swings the mark-word
// to 0 can perform (or more precisely, complete) inflation.
//

View File

@ -204,21 +204,21 @@ MemoryUsage ContiguousSpacePool::get_memory_usage() {
return MemoryUsage(initial_size(), used, committed, maxSize);
}
SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen,
SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->from()->capacity(), max_size,
support_usage_threshold), _gen(gen) {
CollectedMemoryPool(name, type, young_gen->from()->capacity(), max_size,
support_usage_threshold), _young_gen(young_gen) {
}
size_t SurvivorContiguousSpacePool::used_in_bytes() {
return _gen->from()->used();
return _young_gen->from()->used();
}
size_t SurvivorContiguousSpacePool::committed_in_bytes() {
return _gen->from()->capacity();
return _young_gen->from()->capacity();
}
MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -163,10 +163,10 @@ public:
class SurvivorContiguousSpacePool : public CollectedMemoryPool {
private:
DefNewGeneration* _gen;
DefNewGeneration* _young_gen;
public:
SurvivorContiguousSpacePool(DefNewGeneration* gen,
SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
const char* name,
PoolType type,
size_t max_size,

View File

@ -212,13 +212,13 @@ MemoryPool* MemoryService::add_space(ContiguousSpace* space,
return (MemoryPool*) pool;
}
MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen,
MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* young_gen,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold) {
MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold);
SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(young_gen, name, type, max_size, support_usage_threshold);
_pools_list->append(pool);
return (MemoryPool*) pool;
@ -328,18 +328,18 @@ void MemoryService::add_generation_memory_pool(Generation* gen,
#if INCLUDE_ALL_GCS
void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
void MemoryService::add_psYoung_memory_pool(PSYoungGen* young_gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
// Add a memory pool for each space and young gen doesn't
// support low memory detection as it is expected to get filled up.
EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen,
gen->eden_space(),
EdenMutableSpacePool* eden = new EdenMutableSpacePool(young_gen,
young_gen->eden_space(),
"PS Eden Space",
MemoryPool::Heap,
false /* support_usage_threshold */);
SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen,
SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(young_gen,
"PS Survivor Space",
MemoryPool::Heap,
false /* support_usage_threshold */);
@ -352,13 +352,13 @@ void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* majo
_pools_list->append(survivor);
}
void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) {
PSGenerationPool* old_gen = new PSGenerationPool(gen,
void MemoryService::add_psOld_memory_pool(PSOldGen* old_gen, MemoryManager* mgr) {
PSGenerationPool* old_gen_pool = new PSGenerationPool(old_gen,
"PS Old Gen",
MemoryPool::Heap,
true /* support_usage_threshold */);
mgr->add_pool(old_gen);
_pools_list->append(old_gen);
mgr->add_pool(old_gen_pool);
_pools_list->append(old_gen_pool);
}
void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
@ -548,7 +548,7 @@ Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
}
//
// GC manager type depends on the type of Generation. Depending on the space
// availablity and vm options the gc uses major gc manager or minor gc
// availability and vm options the gc uses major gc manager or minor gc
// manager or both. The type of gc manager depends on the generation kind.
// For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
// _fullGC is set to false ) and for other generation kinds doing

View File

@ -80,10 +80,10 @@ private:
}
static void add_psYoung_memory_pool(PSYoungGen* gen,
static void add_psYoung_memory_pool(PSYoungGen* young_gen,
MemoryManager* major_mgr,
MemoryManager* minor_mgr);
static void add_psOld_memory_pool(PSOldGen* gen,
static void add_psOld_memory_pool(PSOldGen* old_gen,
MemoryManager* mgr);
static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
@ -97,7 +97,7 @@ private:
bool is_heap,
size_t max_size,
bool support_usage_threshold);
static MemoryPool* add_survivor_spaces(DefNewGeneration* gen,
static MemoryPool* add_survivor_spaces(DefNewGeneration* young_gen,
const char* name,
bool is_heap,
size_t max_size,
@ -162,7 +162,6 @@ public:
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause);
static void oops_do(OopClosure* f);
static bool get_verbose() { return PrintGC; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,18 +33,18 @@
#include "services/memoryManager.hpp"
#include "services/psMemoryPool.hpp"
PSGenerationPool::PSGenerationPool(PSOldGen* gen,
PSGenerationPool::PSGenerationPool(PSOldGen* old_gen,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
CollectedMemoryPool(name, type, old_gen->capacity_in_bytes(),
old_gen->reserved().byte_size(), support_usage_threshold), _old_gen(old_gen) {
}
MemoryUsage PSGenerationPool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = _gen->capacity_in_bytes();
size_t committed = _old_gen->capacity_in_bytes();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
@ -55,15 +55,16 @@ MemoryUsage PSGenerationPool::get_memory_usage() {
// Max size of PS eden space is changing due to ergonomic.
// PSYoungGen, PSOldGen, Eden, Survivor spaces are all resizable.
//
EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen,
EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen,
MutableSpace* space,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, space->capacity_in_bytes(),
(gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()),
(young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes()),
support_usage_threshold),
_gen(gen), _space(space) {
_young_gen(young_gen),
_space(space) {
}
MemoryUsage EdenMutableSpacePool::get_memory_usage() {
@ -79,13 +80,13 @@ MemoryUsage EdenMutableSpacePool::get_memory_usage() {
//
// PS from and to survivor spaces could have different sizes.
//
SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen,
SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(),
gen->from_space()->capacity_in_bytes(),
support_usage_threshold), _gen(gen) {
CollectedMemoryPool(name, type, young_gen->from_space()->capacity_in_bytes(),
young_gen->from_space()->capacity_in_bytes(),
support_usage_threshold), _young_gen(young_gen) {
}
MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {

View File

@ -39,23 +39,23 @@
class PSGenerationPool : public CollectedMemoryPool {
private:
PSOldGen* _gen;
PSOldGen* _old_gen;
public:
PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return _gen->used_in_bytes(); }
size_t max_size() const { return _gen->reserved().byte_size(); }
size_t used_in_bytes() { return _old_gen->used_in_bytes(); }
size_t max_size() const { return _old_gen->reserved().byte_size(); }
};
class EdenMutableSpacePool : public CollectedMemoryPool {
private:
PSYoungGen* _gen;
PSYoungGen* _young_gen;
MutableSpace* _space;
public:
EdenMutableSpacePool(PSYoungGen* gen,
EdenMutableSpacePool(PSYoungGen* young_gen,
MutableSpace* space,
const char* name,
PoolType type,
@ -66,16 +66,16 @@ public:
size_t used_in_bytes() { return space()->used_in_bytes(); }
size_t max_size() const {
// Eden's max_size = max_size of Young Gen - the current committed size of survivor spaces
return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes();
return _young_gen->max_size() - _young_gen->from_space()->capacity_in_bytes() - _young_gen->to_space()->capacity_in_bytes();
}
};
class SurvivorMutableSpacePool : public CollectedMemoryPool {
private:
PSYoungGen* _gen;
PSYoungGen* _young_gen;
public:
SurvivorMutableSpacePool(PSYoungGen* gen,
SurvivorMutableSpacePool(PSYoungGen* young_gen,
const char* name,
PoolType type,
bool support_usage_threshold);
@ -83,14 +83,14 @@ public:
MemoryUsage get_memory_usage();
size_t used_in_bytes() {
return _gen->from_space()->used_in_bytes();
return _young_gen->from_space()->used_in_bytes();
}
size_t committed_in_bytes() {
return _gen->from_space()->capacity_in_bytes();
return _young_gen->from_space()->capacity_in_bytes();
}
size_t max_size() const {
// Return current committed size of the from-space
return _gen->from_space()->capacity_in_bytes();
return _young_gen->from_space()->capacity_in_bytes();
}
};