8057632: Remove auxiliary code used to handle the generations array
Removed next_gen(), prev_gen(), and get_gen(). Reviewed-by: kbarrett, tschatzl
This commit is contained in:
parent
a390d5a7cb
commit
7e86840f64
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -186,7 +186,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
|
|||||||
cp->space->set_compaction_top(compact_top);
|
cp->space->set_compaction_top(compact_top);
|
||||||
cp->space = cp->space->next_compaction_space();
|
cp->space = cp->space->next_compaction_space();
|
||||||
if (cp->space == NULL) {
|
if (cp->space == NULL) {
|
||||||
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
|
cp->gen = GenCollectedHeap::heap()->young_gen();
|
||||||
assert(cp->gen != NULL, "compaction must succeed");
|
assert(cp->gen != NULL, "compaction must succeed");
|
||||||
cp->space = cp->gen->first_compaction_space();
|
cp->space = cp->gen->first_compaction_space();
|
||||||
assert(cp->space != NULL, "generation must have a first compaction space");
|
assert(cp->space != NULL, "generation must have a first compaction space");
|
||||||
@ -900,7 +900,6 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Callers of this iterator beware: The closure application should
|
// Callers of this iterator beware: The closure application should
|
||||||
// be robust in the face of uninitialized objects and should (always)
|
// be robust in the face of uninitialized objects and should (always)
|
||||||
// return a correct size so that the next addr + size below gives us a
|
// return a correct size so that the next addr + size below gives us a
|
||||||
|
@ -369,7 +369,7 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
|
|||||||
double CMSStats::time_until_cms_gen_full() const {
|
double CMSStats::time_until_cms_gen_full() const {
|
||||||
size_t cms_free = _cms_gen->cmsSpace()->free();
|
size_t cms_free = _cms_gen->cmsSpace()->free();
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
|
size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
|
||||||
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
||||||
if (cms_free > expected_promotion) {
|
if (cms_free > expected_promotion) {
|
||||||
// Start a cms collection if there isn't enough space to promote
|
// Start a cms collection if there isn't enough space to promote
|
||||||
@ -626,8 +626,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
|||||||
|
|
||||||
// Support for parallelizing young gen rescan
|
// Support for parallelizing young gen rescan
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
assert(gch->prev_gen(_cmsGen)->kind() == Generation::ParNew, "CMS can only be used with ParNew");
|
assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
|
||||||
_young_gen = (ParNewGeneration*)gch->prev_gen(_cmsGen);
|
_young_gen = (ParNewGeneration*)gch->young_gen();
|
||||||
if (gch->supports_inline_contig_alloc()) {
|
if (gch->supports_inline_contig_alloc()) {
|
||||||
_top_addr = gch->top_addr();
|
_top_addr = gch->top_addr();
|
||||||
_end_addr = gch->end_addr();
|
_end_addr = gch->end_addr();
|
||||||
@ -869,7 +869,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
|
|||||||
if (prev_level >= 0) {
|
if (prev_level >= 0) {
|
||||||
size_t prev_size = 0;
|
size_t prev_size = 0;
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
Generation* prev_gen = gch->get_gen(prev_level);
|
Generation* prev_gen = gch->young_gen();
|
||||||
prev_size = prev_gen->capacity();
|
prev_size = prev_gen->capacity();
|
||||||
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
|
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
|
||||||
prev_size/1000);
|
prev_size/1000);
|
||||||
@ -1049,11 +1049,8 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
|||||||
// expand and retry
|
// expand and retry
|
||||||
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
|
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
|
||||||
expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
|
expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
|
||||||
// Since there's currently no next generation, we don't try to promote
|
// Since this is the old generation, we don't try to promote
|
||||||
// into a more senior generation.
|
// into a more senior generation.
|
||||||
assert(next_gen() == NULL, "assumption, based upon which no attempt "
|
|
||||||
"is made to pass on a possibly failing "
|
|
||||||
"promotion to next generation");
|
|
||||||
res = _cmsSpace->promote(obj, obj_size);
|
res = _cmsSpace->promote(obj, obj_size);
|
||||||
}
|
}
|
||||||
if (res != NULL) {
|
if (res != NULL) {
|
||||||
|
@ -325,7 +325,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
ParallelTaskTerminator& _term;
|
ParallelTaskTerminator& _term;
|
||||||
ParNewGeneration& _gen;
|
ParNewGeneration& _gen;
|
||||||
Generation& _next_gen;
|
Generation& _old_gen;
|
||||||
public:
|
public:
|
||||||
bool is_valid(int id) const { return id < length(); }
|
bool is_valid(int id) const { return id < length(); }
|
||||||
ParallelTaskTerminator* terminator() { return &_term; }
|
ParallelTaskTerminator* terminator() { return &_term; }
|
||||||
@ -338,7 +338,7 @@ ParScanThreadStateSet::ParScanThreadStateSet(
|
|||||||
Stack<oop, mtGC>* overflow_stacks,
|
Stack<oop, mtGC>* overflow_stacks,
|
||||||
size_t desired_plab_sz, ParallelTaskTerminator& term)
|
size_t desired_plab_sz, ParallelTaskTerminator& term)
|
||||||
: ResourceArray(sizeof(ParScanThreadState), num_threads),
|
: ResourceArray(sizeof(ParScanThreadState), num_threads),
|
||||||
_gen(gen), _next_gen(old_gen), _term(term)
|
_gen(gen), _old_gen(old_gen), _term(term)
|
||||||
{
|
{
|
||||||
assert(num_threads > 0, "sanity check!");
|
assert(num_threads > 0, "sanity check!");
|
||||||
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
|
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
|
||||||
@ -471,8 +471,8 @@ void ParScanThreadStateSet::flush()
|
|||||||
_gen.age_table()->merge(local_table);
|
_gen.age_table()->merge(local_table);
|
||||||
|
|
||||||
// Inform old gen that we're done.
|
// Inform old gen that we're done.
|
||||||
_next_gen.par_promote_alloc_done(i);
|
_old_gen.par_promote_alloc_done(i);
|
||||||
_next_gen.par_oop_since_save_marks_iterate_done(i);
|
_old_gen.par_oop_since_save_marks_iterate_done(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
@ -574,10 +574,10 @@ void ParEvacuateFollowersClosure::do_void() {
|
|||||||
par_scan_state()->end_term_time();
|
par_scan_state()->end_term_time();
|
||||||
}
|
}
|
||||||
|
|
||||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
|
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
|
||||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
|
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
|
||||||
AbstractGangTask("ParNewGeneration collection"),
|
AbstractGangTask("ParNewGeneration collection"),
|
||||||
_gen(gen), _next_gen(next_gen),
|
_gen(gen), _old_gen(old_gen),
|
||||||
_young_old_boundary(young_old_boundary),
|
_young_old_boundary(young_old_boundary),
|
||||||
_state_set(state_set)
|
_state_set(state_set)
|
||||||
{}
|
{}
|
||||||
@ -601,8 +601,6 @@ void ParNewGenTask::work(uint worker_id) {
|
|||||||
// We would need multiple old-gen queues otherwise.
|
// We would need multiple old-gen queues otherwise.
|
||||||
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
|
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
|
||||||
|
|
||||||
Generation* old_gen = gch->next_gen(_gen);
|
|
||||||
|
|
||||||
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
|
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
|
||||||
assert(_state_set->is_valid(worker_id), "Should not have been called");
|
assert(_state_set->is_valid(worker_id), "Should not have been called");
|
||||||
|
|
||||||
@ -763,8 +761,9 @@ void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier
|
|||||||
class ParNewRefProcTaskProxy: public AbstractGangTask {
|
class ParNewRefProcTaskProxy: public AbstractGangTask {
|
||||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||||
public:
|
public:
|
||||||
ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
|
ParNewRefProcTaskProxy(ProcessTask& task,
|
||||||
Generation& next_gen,
|
ParNewGeneration& gen,
|
||||||
|
Generation& old_gen,
|
||||||
HeapWord* young_old_boundary,
|
HeapWord* young_old_boundary,
|
||||||
ParScanThreadStateSet& state_set);
|
ParScanThreadStateSet& state_set);
|
||||||
|
|
||||||
@ -776,20 +775,20 @@ private:
|
|||||||
private:
|
private:
|
||||||
ParNewGeneration& _gen;
|
ParNewGeneration& _gen;
|
||||||
ProcessTask& _task;
|
ProcessTask& _task;
|
||||||
Generation& _next_gen;
|
Generation& _old_gen;
|
||||||
HeapWord* _young_old_boundary;
|
HeapWord* _young_old_boundary;
|
||||||
ParScanThreadStateSet& _state_set;
|
ParScanThreadStateSet& _state_set;
|
||||||
};
|
};
|
||||||
|
|
||||||
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
|
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
|
||||||
ProcessTask& task, ParNewGeneration& gen,
|
ParNewGeneration& gen,
|
||||||
Generation& next_gen,
|
Generation& old_gen,
|
||||||
HeapWord* young_old_boundary,
|
HeapWord* young_old_boundary,
|
||||||
ParScanThreadStateSet& state_set)
|
ParScanThreadStateSet& state_set)
|
||||||
: AbstractGangTask("ParNewGeneration parallel reference processing"),
|
: AbstractGangTask("ParNewGeneration parallel reference processing"),
|
||||||
_gen(gen),
|
_gen(gen),
|
||||||
_task(task),
|
_task(task),
|
||||||
_next_gen(next_gen),
|
_old_gen(old_gen),
|
||||||
_young_old_boundary(young_old_boundary),
|
_young_old_boundary(young_old_boundary),
|
||||||
_state_set(state_set)
|
_state_set(state_set)
|
||||||
{
|
{
|
||||||
@ -893,7 +892,7 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
|
|||||||
from()->set_next_compaction_space(to());
|
from()->set_next_compaction_space(to());
|
||||||
gch->set_incremental_collection_failed();
|
gch->set_incremental_collection_failed();
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_old_gen->promotion_failure_occurred();
|
||||||
|
|
||||||
// Trace promotion failure in the parallel GC threads
|
// Trace promotion failure in the parallel GC threads
|
||||||
thread_state_set.trace_promotion_failed(gc_tracer());
|
thread_state_set.trace_promotion_failed(gc_tracer());
|
||||||
@ -927,7 +926,7 @@ void ParNewGeneration::collect(bool full,
|
|||||||
workers->set_active_workers(active_workers);
|
workers->set_active_workers(active_workers);
|
||||||
assert(gch->n_gens() == 2,
|
assert(gch->n_gens() == 2,
|
||||||
"Par collection currently only works with single older gen.");
|
"Par collection currently only works with single older gen.");
|
||||||
_next_gen = gch->next_gen(this);
|
_old_gen = gch->old_gen();
|
||||||
|
|
||||||
// If the next generation is too full to accommodate worst-case promotion
|
// If the next generation is too full to accommodate worst-case promotion
|
||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
@ -968,10 +967,10 @@ void ParNewGeneration::collect(bool full,
|
|||||||
// because only those workers go through the termination protocol.
|
// because only those workers go through the termination protocol.
|
||||||
ParallelTaskTerminator _term(n_workers, task_queues());
|
ParallelTaskTerminator _term(n_workers, task_queues());
|
||||||
ParScanThreadStateSet thread_state_set(workers->active_workers(),
|
ParScanThreadStateSet thread_state_set(workers->active_workers(),
|
||||||
*to(), *this, *_next_gen, *task_queues(),
|
*to(), *this, *_old_gen, *task_queues(),
|
||||||
_overflow_stacks, desired_plab_sz(), _term);
|
_overflow_stacks, desired_plab_sz(), _term);
|
||||||
|
|
||||||
ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
|
ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
|
||||||
gch->set_par_threads(n_workers);
|
gch->set_par_threads(n_workers);
|
||||||
gch->rem_set()->prepare_for_younger_refs_iterate(true);
|
gch->rem_set()->prepare_for_younger_refs_iterate(true);
|
||||||
// It turns out that even when we're using 1 thread, doing the work in a
|
// It turns out that even when we're using 1 thread, doing the work in a
|
||||||
@ -1191,8 +1190,8 @@ oop ParNewGeneration::copy_to_survivor_space(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!_promotion_failed) {
|
if (!_promotion_failed) {
|
||||||
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
|
new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
|
||||||
old, m, sz);
|
old, m, sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
if (new_obj == NULL) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -233,13 +233,13 @@ class ParScanThreadState {
|
|||||||
class ParNewGenTask: public AbstractGangTask {
|
class ParNewGenTask: public AbstractGangTask {
|
||||||
private:
|
private:
|
||||||
ParNewGeneration* _gen;
|
ParNewGeneration* _gen;
|
||||||
Generation* _next_gen;
|
Generation* _old_gen;
|
||||||
HeapWord* _young_old_boundary;
|
HeapWord* _young_old_boundary;
|
||||||
class ParScanThreadStateSet* _state_set;
|
class ParScanThreadStateSet* _state_set;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ParNewGenTask(ParNewGeneration* gen,
|
ParNewGenTask(ParNewGeneration* gen,
|
||||||
Generation* next_gen,
|
Generation* old_gen,
|
||||||
HeapWord* young_old_boundary,
|
HeapWord* young_old_boundary,
|
||||||
ParScanThreadStateSet* state_set);
|
ParScanThreadStateSet* state_set);
|
||||||
|
|
||||||
|
@ -601,7 +601,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
HandleMark hm; // Discard any handles allocated in each iteration.
|
HandleMark hm; // Discard any handles allocated in each iteration.
|
||||||
|
|
||||||
// First allocation attempt is lock-free.
|
// First allocation attempt is lock-free.
|
||||||
Generation *young = gch->get_gen(0);
|
Generation *young = gch->young_gen();
|
||||||
assert(young->supports_inline_contig_alloc(),
|
assert(young->supports_inline_contig_alloc(),
|
||||||
"Otherwise, must do alloc within heap lock");
|
"Otherwise, must do alloc within heap lock");
|
||||||
if (young->should_allocate(size, is_tlab)) {
|
if (young->should_allocate(size, is_tlab)) {
|
||||||
@ -615,8 +615,8 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
{
|
{
|
||||||
MutexLocker ml(Heap_lock);
|
MutexLocker ml(Heap_lock);
|
||||||
if (PrintGC && Verbose) {
|
if (PrintGC && Verbose) {
|
||||||
gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
|
gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
|
||||||
" attempting locked slow path allocation");
|
" attempting locked slow path allocation");
|
||||||
}
|
}
|
||||||
// Note that only large objects get a shot at being
|
// Note that only large objects get a shot at being
|
||||||
// allocated in later generations.
|
// allocated in later generations.
|
||||||
@ -705,7 +705,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
// Give a warning if we seem to be looping forever.
|
// Give a warning if we seem to be looping forever.
|
||||||
if ((QueuedAllocationWarningCount > 0) &&
|
if ((QueuedAllocationWarningCount > 0) &&
|
||||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||||
warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
|
warning("GenCollectorPolicy::mem_allocate_work retries %d times \n\t"
|
||||||
" size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
|
" size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -715,10 +715,14 @@ HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
|
|||||||
bool is_tlab) {
|
bool is_tlab) {
|
||||||
GenCollectedHeap *gch = GenCollectedHeap::heap();
|
GenCollectedHeap *gch = GenCollectedHeap::heap();
|
||||||
HeapWord* result = NULL;
|
HeapWord* result = NULL;
|
||||||
for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
|
Generation *old = gch->old_gen();
|
||||||
Generation *gen = gch->get_gen(i);
|
if (old->should_allocate(size, is_tlab)) {
|
||||||
if (gen->should_allocate(size, is_tlab)) {
|
result = old->expand_and_allocate(size, is_tlab);
|
||||||
result = gen->expand_and_allocate(size, is_tlab);
|
}
|
||||||
|
if (result == NULL) {
|
||||||
|
Generation *young = gch->young_gen();
|
||||||
|
if (young->should_allocate(size, is_tlab)) {
|
||||||
|
result = young->expand_and_allocate(size, is_tlab);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
|
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
|
||||||
@ -891,7 +895,7 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
|
|||||||
bool GenCollectorPolicy::should_try_older_generation_allocation(
|
bool GenCollectorPolicy::should_try_older_generation_allocation(
|
||||||
size_t word_size) const {
|
size_t word_size) const {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t young_capacity = gch->get_gen(0)->capacity_before_gc();
|
size_t young_capacity = gch->young_gen()->capacity_before_gc();
|
||||||
return (word_size > heap_word_size(young_capacity))
|
return (word_size > heap_word_size(young_capacity))
|
||||||
|| GC_locker::is_active_and_needs_gc()
|
|| GC_locker::is_active_and_needs_gc()
|
||||||
|| gch->incremental_collection_failed();
|
|| gch->incremental_collection_failed();
|
||||||
|
@ -226,7 +226,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
|||||||
|
|
||||||
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||||
update_counters();
|
update_counters();
|
||||||
_next_gen = NULL;
|
_old_gen = NULL;
|
||||||
_tenuring_threshold = MaxTenuringThreshold;
|
_tenuring_threshold = MaxTenuringThreshold;
|
||||||
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
|
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
|
||||||
|
|
||||||
@ -383,8 +383,8 @@ void DefNewGeneration::compute_new_size() {
|
|||||||
assert(next_level < gch->_n_gens,
|
assert(next_level < gch->_n_gens,
|
||||||
"DefNewGeneration cannot be an oldest gen");
|
"DefNewGeneration cannot be an oldest gen");
|
||||||
|
|
||||||
Generation* next_gen = gch->get_gen(next_level);
|
Generation* old_gen = gch->old_gen();
|
||||||
size_t old_size = next_gen->capacity();
|
size_t old_size = old_gen->capacity();
|
||||||
size_t new_size_before = _virtual_space.committed_size();
|
size_t new_size_before = _virtual_space.committed_size();
|
||||||
size_t min_new_size = spec()->init_size();
|
size_t min_new_size = spec()->init_size();
|
||||||
size_t max_new_size = reserved().byte_size();
|
size_t max_new_size = reserved().byte_size();
|
||||||
@ -568,7 +568,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
DefNewTracer gc_tracer;
|
DefNewTracer gc_tracer;
|
||||||
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
|
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
|
||||||
|
|
||||||
_next_gen = gch->next_gen(this);
|
_old_gen = gch->old_gen();
|
||||||
|
|
||||||
// If the next generation is too full to accommodate promotion
|
// If the next generation is too full to accommodate promotion
|
||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
@ -688,7 +688,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
gch->set_incremental_collection_failed();
|
gch->set_incremental_collection_failed();
|
||||||
|
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_old_gen->promotion_failure_occurred();
|
||||||
gc_tracer.report_promotion_failed(_promotion_failed_info);
|
gc_tracer.report_promotion_failed(_promotion_failed_info);
|
||||||
|
|
||||||
// Reset the PromotionFailureALot counters.
|
// Reset the PromotionFailureALot counters.
|
||||||
@ -793,7 +793,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
|
|||||||
|
|
||||||
// Otherwise try allocating obj tenured
|
// Otherwise try allocating obj tenured
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
obj = _next_gen->promote(old, s);
|
obj = _old_gen->promote(old, s);
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
handle_promotion_failure(old);
|
handle_promotion_failure(old);
|
||||||
return old;
|
return old;
|
||||||
@ -898,11 +898,11 @@ bool DefNewGeneration::collection_attempt_is_safe() {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (_next_gen == NULL) {
|
if (_old_gen == NULL) {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
_next_gen = gch->next_gen(this);
|
_old_gen = gch->old_gen();
|
||||||
}
|
}
|
||||||
return _next_gen->promotion_attempt_is_safe(used());
|
return _old_gen->promotion_attempt_is_safe(used());
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::gc_epilogue(bool full) {
|
void DefNewGeneration::gc_epilogue(bool full) {
|
||||||
@ -1022,8 +1022,7 @@ CompactibleSpace* DefNewGeneration::first_compaction_space() const {
|
|||||||
return eden();
|
return eden();
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* DefNewGeneration::allocate(size_t word_size,
|
HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
|
||||||
bool is_tlab) {
|
|
||||||
// This is the slow-path allocation for the DefNewGeneration.
|
// This is the slow-path allocation for the DefNewGeneration.
|
||||||
// Most allocations are fast-path in compiled code.
|
// Most allocations are fast-path in compiled code.
|
||||||
// We try to allocate from the eden. If that works, we are happy.
|
// We try to allocate from the eden. If that works, we are happy.
|
||||||
@ -1031,8 +1030,8 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
|
|||||||
// have to use it here, as well.
|
// have to use it here, as well.
|
||||||
HeapWord* result = eden()->par_allocate(word_size);
|
HeapWord* result = eden()->par_allocate(word_size);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
|
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
|
||||||
_next_gen->sample_eden_chunk();
|
_old_gen->sample_eden_chunk();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If the eden is full and the last collection bailed out, we are running
|
// If the eden is full and the last collection bailed out, we are running
|
||||||
@ -1047,8 +1046,8 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
|
|||||||
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
||||||
bool is_tlab) {
|
bool is_tlab) {
|
||||||
HeapWord* res = eden()->par_allocate(word_size);
|
HeapWord* res = eden()->par_allocate(word_size);
|
||||||
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
|
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
|
||||||
_next_gen->sample_eden_chunk();
|
_old_gen->sample_eden_chunk();
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -42,7 +42,7 @@ class DefNewGeneration: public Generation {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Generation* _next_gen;
|
Generation* _old_gen;
|
||||||
uint _tenuring_threshold; // Tenuring threshold for next collection.
|
uint _tenuring_threshold; // Tenuring threshold for next collection.
|
||||||
ageTable _age_table;
|
ageTable _age_table;
|
||||||
// Size of object to pretenure in words; command line provides bytes
|
// Size of object to pretenure in words; command line provides bytes
|
||||||
|
@ -177,18 +177,17 @@ void GenCollectedHeap::post_initialize() {
|
|||||||
SharedHeap::post_initialize();
|
SharedHeap::post_initialize();
|
||||||
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
||||||
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
||||||
assert((get_gen(0)->kind() == Generation::DefNew) ||
|
assert((_young_gen->kind() == Generation::DefNew) ||
|
||||||
(get_gen(0)->kind() == Generation::ParNew),
|
(_young_gen->kind() == Generation::ParNew),
|
||||||
"Wrong youngest generation type");
|
"Wrong youngest generation type");
|
||||||
DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
|
DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
|
||||||
|
|
||||||
Generation* old_gen = get_gen(1);
|
assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
||||||
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
_old_gen->kind() == Generation::MarkSweepCompact,
|
||||||
old_gen->kind() == Generation::MarkSweepCompact,
|
|
||||||
"Wrong generation kind");
|
"Wrong generation kind");
|
||||||
|
|
||||||
policy->initialize_size_policy(def_new_gen->eden()->capacity(),
|
policy->initialize_size_policy(def_new_gen->eden()->capacity(),
|
||||||
old_gen->capacity(),
|
_old_gen->capacity(),
|
||||||
def_new_gen->from()->capacity());
|
def_new_gen->from()->capacity());
|
||||||
policy->initialize_gc_policy_counters();
|
policy->initialize_gc_policy_counters();
|
||||||
}
|
}
|
||||||
@ -1113,10 +1112,10 @@ void GenCollectedHeap::print_on_error(outputStream* st) const {
|
|||||||
|
|
||||||
void GenCollectedHeap::print_tracing_info() const {
|
void GenCollectedHeap::print_tracing_info() const {
|
||||||
if (TraceYoungGenTime) {
|
if (TraceYoungGenTime) {
|
||||||
get_gen(0)->print_summary_info();
|
_young_gen->print_summary_info();
|
||||||
}
|
}
|
||||||
if (TraceOldGenTime) {
|
if (TraceOldGenTime) {
|
||||||
get_gen(1)->print_summary_info();
|
_old_gen->print_summary_info();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,27 +373,6 @@ public:
|
|||||||
// collection.
|
// collection.
|
||||||
virtual bool is_maximal_no_gc() const;
|
virtual bool is_maximal_no_gc() const;
|
||||||
|
|
||||||
// Return the generation before "gen".
|
|
||||||
Generation* prev_gen(Generation* gen) const {
|
|
||||||
guarantee(gen->level() == 1, "Out of bounds");
|
|
||||||
return _young_gen;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the generation after "gen".
|
|
||||||
Generation* next_gen(Generation* gen) const {
|
|
||||||
guarantee(gen->level() == 0, "Out of bounds");
|
|
||||||
return _old_gen;
|
|
||||||
}
|
|
||||||
|
|
||||||
Generation* get_gen(int i) const {
|
|
||||||
guarantee(i == 0 || i == 1, "Out of bounds");
|
|
||||||
if (i == 0) {
|
|
||||||
return _young_gen;
|
|
||||||
} else {
|
|
||||||
return _old_gen;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int n_gens() const {
|
int n_gens() const {
|
||||||
assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
|
assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
|
||||||
return _n_gens;
|
return _n_gens;
|
||||||
@ -486,7 +465,7 @@ public:
|
|||||||
assert(heap()->collector_policy()->is_generation_policy(),
|
assert(heap()->collector_policy()->is_generation_policy(),
|
||||||
"the following definition may not be suitable for an n(>2)-generation system");
|
"the following definition may not be suitable for an n(>2)-generation system");
|
||||||
return incremental_collection_failed() ||
|
return incremental_collection_failed() ||
|
||||||
(consult_young && !get_gen(0)->collection_attempt_is_safe());
|
(consult_young && !_young_gen->collection_attempt_is_safe());
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a generation bails out of an incremental collection,
|
// If a generation bails out of an incremental collection,
|
||||||
|
@ -109,20 +109,16 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
|
|||||||
|
|
||||||
deallocate_stacks();
|
deallocate_stacks();
|
||||||
|
|
||||||
// If compaction completely evacuated all generations younger than this
|
// If compaction completely evacuated the young generation then we
|
||||||
// one, then we can clear the card table. Otherwise, we must invalidate
|
// can clear the card table. Otherwise, we must invalidate
|
||||||
// it (consider all cards dirty). In the future, we might consider doing
|
// it (consider all cards dirty). In the future, we might consider doing
|
||||||
// compaction within generations only, and doing card-table sliding.
|
// compaction within generations only, and doing card-table sliding.
|
||||||
bool all_empty = true;
|
|
||||||
for (int i = 0; all_empty && i < level; i++) {
|
|
||||||
Generation* g = gch->get_gen(i);
|
|
||||||
all_empty = all_empty && gch->get_gen(i)->used() == 0;
|
|
||||||
}
|
|
||||||
GenRemSet* rs = gch->rem_set();
|
GenRemSet* rs = gch->rem_set();
|
||||||
Generation* old_gen = gch->get_gen(level);
|
Generation* old_gen = gch->old_gen();
|
||||||
|
|
||||||
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
|
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
|
||||||
if (all_empty) {
|
if (gch->young_gen()->used() == 0) {
|
||||||
// We've evacuated all generations below us.
|
// We've evacuated the young generation.
|
||||||
rs->clear_into_younger(old_gen);
|
rs->clear_into_younger(old_gen);
|
||||||
} else {
|
} else {
|
||||||
// Invalidate the cards corresponding to the currently used
|
// Invalidate the cards corresponding to the currently used
|
||||||
@ -157,9 +153,8 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
|
|||||||
|
|
||||||
void GenMarkSweep::allocate_stacks() {
|
void GenMarkSweep::allocate_stacks() {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
// Scratch request on behalf of oldest generation; will do no
|
// Scratch request on behalf of old generation; will do no allocation.
|
||||||
// allocation.
|
ScratchBlock* scratch = gch->gather_scratch(gch->old_gen(), 0);
|
||||||
ScratchBlock* scratch = gch->gather_scratch(gch->get_gen(gch->_n_gens-1), 0);
|
|
||||||
|
|
||||||
// $$$ To cut a corner, we'll only use the first scratch block, and then
|
// $$$ To cut a corner, we'll only use the first scratch block, and then
|
||||||
// revert to malloc.
|
// revert to malloc.
|
||||||
@ -188,7 +183,7 @@ void GenMarkSweep::deallocate_stacks() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GenMarkSweep::mark_sweep_phase1(int level,
|
void GenMarkSweep::mark_sweep_phase1(int level,
|
||||||
bool clear_all_softrefs) {
|
bool clear_all_softrefs) {
|
||||||
// Recursively traverse all live objects and mark them
|
// Recursively traverse all live objects and mark them
|
||||||
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
|
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
|
||||||
trace(" 1");
|
trace(" 1");
|
||||||
@ -199,7 +194,8 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
|||||||
// use OopsInGenClosure constructor which takes a generation,
|
// use OopsInGenClosure constructor which takes a generation,
|
||||||
// as the Universe has not been created when the static constructors
|
// as the Universe has not been created when the static constructors
|
||||||
// are run.
|
// are run.
|
||||||
follow_root_closure.set_orig_generation(gch->get_gen(level));
|
assert(level == 1, "We don't use mark-sweep on young generations");
|
||||||
|
follow_root_closure.set_orig_generation(gch->old_gen());
|
||||||
|
|
||||||
// Need new claim bits before marking starts.
|
// Need new claim bits before marking starts.
|
||||||
ClassLoaderDataGraph::clear_claimed_marks();
|
ClassLoaderDataGraph::clear_claimed_marks();
|
||||||
@ -287,7 +283,8 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
|||||||
// use OopsInGenClosure constructor which takes a generation,
|
// use OopsInGenClosure constructor which takes a generation,
|
||||||
// as the Universe has not been created when the static constructors
|
// as the Universe has not been created when the static constructors
|
||||||
// are run.
|
// are run.
|
||||||
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
|
assert(level == 1, "We don't use mark-sweep on young generations.");
|
||||||
|
adjust_pointer_closure.set_orig_generation(gch->old_gen());
|
||||||
|
|
||||||
gch->gen_process_roots(level,
|
gch->gen_process_roots(level,
|
||||||
false, // Younger gens are not roots.
|
false, // Younger gens are not roots.
|
||||||
|
@ -153,9 +153,8 @@ bool Generation::is_in(const void* p) const {
|
|||||||
|
|
||||||
Generation* Generation::next_gen() const {
|
Generation* Generation::next_gen() const {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
int next = level() + 1;
|
if (level() == 0) {
|
||||||
if (next < gch->_n_gens) {
|
return gch->old_gen();
|
||||||
return gch->get_gen(next);
|
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
|
|||||||
cp->space->set_compaction_top(compact_top);
|
cp->space->set_compaction_top(compact_top);
|
||||||
cp->space = cp->space->next_compaction_space();
|
cp->space = cp->space->next_compaction_space();
|
||||||
if (cp->space == NULL) {
|
if (cp->space == NULL) {
|
||||||
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
|
cp->gen = GenCollectedHeap::heap()->young_gen();
|
||||||
assert(cp->gen != NULL, "compaction must succeed");
|
assert(cp->gen != NULL, "compaction must succeed");
|
||||||
cp->space = cp->gen->first_compaction_space();
|
cp->space = cp->gen->first_compaction_space();
|
||||||
assert(cp->space != NULL, "generation must have a first compaction space");
|
assert(cp->space != NULL, "generation must have a first compaction space");
|
||||||
|
@ -536,7 +536,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \
|
nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \
|
||||||
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
|
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
|
||||||
\
|
\
|
||||||
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
|
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
|
||||||
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
||||||
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
|
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
|
||||||
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -160,8 +160,8 @@ void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
|
|||||||
_managers_list->append(_minor_gc_manager);
|
_managers_list->append(_minor_gc_manager);
|
||||||
_managers_list->append(_major_gc_manager);
|
_managers_list->append(_major_gc_manager);
|
||||||
|
|
||||||
add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager);
|
add_generation_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
|
||||||
add_generation_memory_pool(heap->get_gen(major), _major_gc_manager);
|
add_generation_memory_pool(heap->old_gen(), _major_gc_manager);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -57,13 +57,6 @@ private:
|
|||||||
init_code_heap_pools_size = 9
|
init_code_heap_pools_size = 9
|
||||||
};
|
};
|
||||||
|
|
||||||
// index for minor and major generations
|
|
||||||
enum {
|
|
||||||
minor = 0,
|
|
||||||
major = 1,
|
|
||||||
n_gens = 2
|
|
||||||
};
|
|
||||||
|
|
||||||
static GrowableArray<MemoryPool*>* _pools_list;
|
static GrowableArray<MemoryPool*>* _pools_list;
|
||||||
static GrowableArray<MemoryManager*>* _managers_list;
|
static GrowableArray<MemoryManager*>* _managers_list;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user