This commit is contained in:
Coleen Phillimore 2015-06-17 23:46:35 +00:00
commit fbb07f9b85
45 changed files with 517 additions and 364 deletions

View File

@ -49,7 +49,6 @@ import sun.jvm.hotspot.types.*;
public abstract class Generation extends VMObject { public abstract class Generation extends VMObject {
private static long reservedFieldOffset; private static long reservedFieldOffset;
private static long virtualSpaceFieldOffset; private static long virtualSpaceFieldOffset;
private static CIntegerField levelField;
protected static final int K = 1024; protected static final int K = 1024;
// Fields for class StatRecord // Fields for class StatRecord
private static Field statRecordField; private static Field statRecordField;
@ -75,7 +74,6 @@ public abstract class Generation extends VMObject {
reservedFieldOffset = type.getField("_reserved").getOffset(); reservedFieldOffset = type.getField("_reserved").getOffset();
virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset(); virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
levelField = type.getCIntegerField("_level");
// StatRecord // StatRecord
statRecordField = type.getField("_stat_record"); statRecordField = type.getField("_stat_record");
type = db.lookupType("Generation::StatRecord"); type = db.lookupType("Generation::StatRecord");
@ -130,14 +128,6 @@ public abstract class Generation extends VMObject {
} }
} }
public GenerationSpec spec() {
return ((GenCollectedHeap) VM.getVM().getUniverse().heap()).spec(level());
}
public int level() {
return (int) levelField.getValue(addr);
}
public int invocations() { public int invocations() {
return getStatRecord().getInvocations(); return getStatRecord().getInvocations();
} }

View File

@ -84,11 +84,11 @@ public class PointerLocation {
} }
public boolean isInNewGen() { public boolean isInNewGen() {
return ((gen != null) && (gen.level() == 0)); return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(0)));
} }
public boolean isInOldGen() { public boolean isInOldGen() {
return ((gen != null) && (gen.level() == 1)); return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(1)));
} }
public boolean inOtherGen() { public boolean inOtherGen() {
@ -207,8 +207,6 @@ public class PointerLocation {
tty.print("In new generation "); tty.print("In new generation ");
} else if (isInOldGen()) { } else if (isInOldGen()) {
tty.print("In old generation "); tty.print("In old generation ");
} else if (gen != null) {
tty.print("In Generation " + getGeneration().level());
} else { } else {
tty.print("In unknown section of Java heap"); tty.print("In unknown section of Java heap");
} }

View File

@ -1654,7 +1654,7 @@ void os::print_memory_info(outputStream* st) {
} }
} }
void os::pd_print_cpu_info(outputStream* st) { void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// cpu // cpu
st->print("CPU:"); st->print("CPU:");
st->print("total %d", os::processor_count()); st->print("total %d", os::processor_count());

View File

@ -1708,7 +1708,7 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_load_average(st); os::Posix::print_load_average(st);
} }
void os::pd_print_cpu_info(outputStream* st) { void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now. // Nothing to do for now.
} }

View File

@ -2216,12 +2216,52 @@ void os::print_memory_info(outputStream* st) {
st->cr(); st->cr();
} }
void os::pd_print_cpu_info(outputStream* st) { // Print the first "model name" line and the first "flags" line
st->print("\n/proc/cpuinfo:\n"); // that we find and nothing more. We assume "model name" comes
if (!_print_ascii_file("/proc/cpuinfo", st)) { // before "flags" so if we find a second "model name", then the
st->print(" <Not Available>"); // "flags" field is considered missing.
static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
#if defined(IA32) || defined(AMD64)
// Other platforms have less repetitive cpuinfo files
FILE *fp = fopen("/proc/cpuinfo", "r");
if (fp) {
while (!feof(fp)) {
if (fgets(buf, buflen, fp)) {
// Assume model name comes before flags
bool model_name_printed = false;
if (strstr(buf, "model name") != NULL) {
if (!model_name_printed) {
st->print_raw("\nCPU Model and flags from /proc/cpuinfo:\n");
st->print_raw(buf);
model_name_printed = true;
} else {
// model name printed but not flags? Odd, just return
fclose(fp);
return true;
}
}
// print the flags line too
if (strstr(buf, "flags") != NULL) {
st->print_raw(buf);
fclose(fp);
return true;
}
}
}
fclose(fp);
}
#endif // x86 platforms
return false;
}
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Only print the model name if the platform provides this as a summary
if (!print_model_name_and_flags(st, buf, buflen)) {
st->print("\n/proc/cpuinfo:\n");
if (!_print_ascii_file("/proc/cpuinfo", st)) {
st->print_cr(" <Not Available>");
}
} }
st->cr();
} }
void os::print_siginfo(outputStream* st, void* siginfo) { void os::print_siginfo(outputStream* st, void* siginfo) {

View File

@ -1997,7 +1997,7 @@ static bool check_addr0(outputStream* st) {
return status; return status;
} }
void os::pd_print_cpu_info(outputStream* st) { void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now. // Nothing to do for now.
} }

View File

@ -1733,7 +1733,7 @@ void os::win32::print_windows_version(outputStream* st) {
st->cr(); st->cr();
} }
void os::pd_print_cpu_info(outputStream* st) { void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now. // Nothing to do for now.
} }

View File

@ -190,10 +190,10 @@ class CMSParGCThreadState: public CHeapObj<mtGC> {
}; };
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
ReservedSpace rs, size_t initial_byte_size, int level, ReservedSpace rs, size_t initial_byte_size,
CardTableRS* ct, bool use_adaptive_freelists, CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct), CardGeneration(rs, initial_byte_size, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_did_compact(false) _did_compact(false)
{ {
@ -682,12 +682,17 @@ void ConcurrentMarkSweepGeneration::print_statistics() {
void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
if (PrintGCDetails) { if (PrintGCDetails) {
// I didn't want to change the logging when removing the level concept,
// but I guess this logging could say "old" or something instead of "1".
assert(gch->is_old_gen(this),
"The CMS generation should be the old generation");
uint level = 1;
if (Verbose) { if (Verbose) {
gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
level(), short_name(), s, used(), capacity()); level, short_name(), s, used(), capacity());
} else { } else {
gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
level(), short_name(), s, used() / K, capacity() / K); level, short_name(), s, used() / K, capacity() / K);
} }
} }
if (Verbose) { if (Verbose) {
@ -797,27 +802,22 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
gclog_or_tty->print_cr("\nFrom compute_new_size: "); gclog_or_tty->print_cr("\nFrom compute_new_size: ");
gclog_or_tty->print_cr(" Free fraction %f", free_percentage); gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
gclog_or_tty->print_cr(" Desired free fraction %f", gclog_or_tty->print_cr(" Desired free fraction %f",
desired_free_percentage); desired_free_percentage);
gclog_or_tty->print_cr(" Maximum free fraction %f", gclog_or_tty->print_cr(" Maximum free fraction %f",
maximum_free_percentage); maximum_free_percentage);
gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000); gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
desired_capacity/1000); desired_capacity/1000);
int prev_level = level() - 1; GenCollectedHeap* gch = GenCollectedHeap::heap();
if (prev_level >= 0) { assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
size_t prev_size = 0; size_t young_size = gch->young_gen()->capacity();
GenCollectedHeap* gch = GenCollectedHeap::heap(); gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
Generation* prev_gen = gch->young_gen();
prev_size = prev_gen->capacity();
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
prev_size/1000);
}
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
unsafe_max_alloc_nogc()/1000); unsafe_max_alloc_nogc()/1000);
gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
contiguous_available()/1000); contiguous_available()/1000);
gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
expand_bytes); expand_bytes);
} }
// safe if expansion fails // safe if expansion fails
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
@ -1650,8 +1650,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_intra_sweep_estimate.padded_average()); _intra_sweep_estimate.padded_average());
} }
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
ref_processor(), clear_all_soft_refs);
#ifdef ASSERT #ifdef ASSERT
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
size_t free_size = cms_space->free(); size_t free_size = cms_space->free();
@ -2432,7 +2431,7 @@ void CMSCollector::verify_after_remark_work_1() {
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
_cmsGen->level(), GenCollectedHeap::OldGen,
true, // younger gens are roots true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
@ -2504,7 +2503,7 @@ void CMSCollector::verify_after_remark_work_2() {
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
_cmsGen->level(), GenCollectedHeap::OldGen,
true, // younger gens are roots true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
@ -3031,7 +3030,7 @@ void CMSCollector::checkpointRootsInitialWork() {
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
_cmsGen->level(), GenCollectedHeap::OldGen,
true, // younger gens are roots true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
@ -4282,15 +4281,12 @@ void CMSCollector::checkpointRootsFinal() {
FlagSetting fl(gch->_is_gc_active, false); FlagSetting fl(gch->_is_gc_active, false);
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
int level = _cmsGen->level() - 1; gch->do_collection(true, // full (i.e. force, see below)
if (level >= 0) { false, // !clear_all_soft_refs
gch->do_collection(true, // full (i.e. force, see below) 0, // size
false, // !clear_all_soft_refs false, // is_tlab
0, // size GenCollectedHeap::YoungGen // type
false, // is_tlab );
level // max_level
);
}
} }
FreelistLocker x(this); FreelistLocker x(this);
MutexLockerEx y(bitMapLock(), MutexLockerEx y(bitMapLock(),
@ -4464,7 +4460,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
CLDToOopClosure cld_closure(&par_mri_cl, true); CLDToOopClosure cld_closure(&par_mri_cl, true);
gch->gen_process_roots(_strong_roots_scope, gch->gen_process_roots(_strong_roots_scope,
_collector->_cmsGen->level(), GenCollectedHeap::OldGen,
false, // yg was scanned above false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(), _collector->should_unload_classes(),
@ -4603,7 +4599,7 @@ void CMSParRemarkTask::work(uint worker_id) {
_timer.reset(); _timer.reset();
_timer.start(); _timer.start();
gch->gen_process_roots(_strong_roots_scope, gch->gen_process_roots(_strong_roots_scope,
_collector->_cmsGen->level(), GenCollectedHeap::OldGen,
false, // yg was scanned above false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(), _collector->should_unload_classes(),
@ -5184,7 +5180,7 @@ void CMSCollector::do_remark_non_parallel() {
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
_cmsGen->level(), GenCollectedHeap::OldGen,
true, // younger gens as roots true, // younger gens as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()), GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(), should_unload_classes(),
@ -5648,11 +5644,12 @@ FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
return _cmsSpace->find_chunk_at_end(); return _cmsSpace->find_chunk_at_end();
} }
void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
bool full) { bool full) {
// The next lower level has been collected. Gather any statistics // If the young generation has been collected, gather any statistics
// that are of interest at this point. // that are of interest at this point.
if (!full && (current_level + 1) == level()) { bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
if (!full && current_is_young) {
// Gather statistics on the young generation collection. // Gather statistics on the young generation collection.
collector()->stats().record_gc0_end(used()); collector()->stats().record_gc0_end(used());
} }

View File

@ -1063,7 +1063,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void shrink_free_list_by(size_t bytes); void shrink_free_list_by(size_t bytes);
// Update statistics for GC // Update statistics for GC
virtual void update_gc_stats(int level, bool full); virtual void update_gc_stats(Generation* current_generation, bool full);
// Maximum available space in the generation (including uncommitted) // Maximum available space in the generation (including uncommitted)
// space. // space.
@ -1079,7 +1079,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
public: public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct, CardTableRS* ct,
bool use_adaptive_freelists, bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice); FreeBlockDictionary<FreeChunk>::DictionaryChoice);

View File

@ -62,25 +62,25 @@
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif #endif
ParScanThreadState::ParScanThreadState(Space* to_space_, ParScanThreadState::ParScanThreadState(Space* to_space_,
ParNewGeneration* gen_, ParNewGeneration* young_gen_,
Generation* old_gen_, Generation* old_gen_,
int thread_num_, int thread_num_,
ObjToScanQueueSet* work_queue_set_, ObjToScanQueueSet* work_queue_set_,
Stack<oop, mtGC>* overflow_stacks_, Stack<oop, mtGC>* overflow_stacks_,
size_t desired_plab_sz_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_) : ParallelTaskTerminator& term_) :
_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
_ageTable(false), // false ==> not the global age table, no perf data. _ageTable(false), // false ==> not the global age table, no perf data.
_to_space_alloc_buffer(desired_plab_sz_), _to_space_alloc_buffer(desired_plab_sz_),
_to_space_closure(gen_, this), _old_gen_closure(gen_, this), _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
_to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
_older_gen_closure(gen_, this), _older_gen_closure(young_gen_, this),
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure, _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
&_to_space_root_closure, gen_, &_old_gen_root_closure, &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
work_queue_set_, &term_), work_queue_set_, &term_),
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure), _keep_alive_closure(&_scan_weak_ref_closure),
_strong_roots_time(0.0), _term_time(0.0) _strong_roots_time(0.0), _term_time(0.0)
{ {
@ -481,7 +481,6 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) : ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
{ {
assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -566,11 +565,11 @@ void ParEvacuateFollowersClosure::do_void() {
par_scan_state()->end_term_time(); par_scan_state()->end_term_time();
} }
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen, ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
StrongRootsScope* strong_roots_scope) : StrongRootsScope* strong_roots_scope) :
AbstractGangTask("ParNewGeneration collection"), AbstractGangTask("ParNewGeneration collection"),
_gen(gen), _old_gen(old_gen), _young_gen(young_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary), _young_old_boundary(young_old_boundary),
_state_set(state_set), _state_set(state_set),
_strong_roots_scope(strong_roots_scope) _strong_roots_scope(strong_roots_scope)
@ -596,7 +595,7 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.start_strong_roots(); par_scan_state.start_strong_roots();
gch->gen_process_roots(_strong_roots_scope, gch->gen_process_roots(_strong_roots_scope,
_gen->level(), GenCollectedHeap::YoungGen,
true, // Process younger gens, if any, true, // Process younger gens, if any,
// as strong roots. // as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::SO_ScavengeCodeCache,
@ -616,8 +615,8 @@ void ParNewGenTask::work(uint worker_id) {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif #endif
ParNewGeneration:: ParNewGeneration::
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
: DefNewGeneration(rs, initial_byte_size, level, "PCopy"), : DefNewGeneration(rs, initial_byte_size, "PCopy"),
_overflow_list(NULL), _overflow_list(NULL),
_is_alive_closure(this), _is_alive_closure(this),
_plab_stats(YoungPLABSize, PLABWeight) _plab_stats(YoungPLABSize, PLABWeight)
@ -752,7 +751,7 @@ public:
private: private:
virtual void work(uint worker_id); virtual void work(uint worker_id);
private: private:
ParNewGeneration& _gen; ParNewGeneration& _young_gen;
ProcessTask& _task; ProcessTask& _task;
Generation& _old_gen; Generation& _old_gen;
HeapWord* _young_old_boundary; HeapWord* _young_old_boundary;
@ -760,12 +759,12 @@ private:
}; };
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
ParNewGeneration& gen, ParNewGeneration& young_gen,
Generation& old_gen, Generation& old_gen,
HeapWord* young_old_boundary, HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set) ParScanThreadStateSet& state_set)
: AbstractGangTask("ParNewGeneration parallel reference processing"), : AbstractGangTask("ParNewGeneration parallel reference processing"),
_gen(gen), _young_gen(young_gen),
_task(task), _task(task),
_old_gen(old_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary), _young_old_boundary(young_old_boundary),
@ -806,12 +805,12 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
FlexibleWorkGang* workers = gch->workers(); FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads."); assert(workers != NULL, "Need parallel worker threads.");
_state_set.reset(workers->active_workers(), _generation.promotion_failed()); _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
_generation.reserved().end(), _state_set); _young_gen.reserved().end(), _state_set);
workers->run_task(&rp_task); workers->run_task(&rp_task);
_state_set.reset(0 /* bad value in debug if not reset */, _state_set.reset(0 /* bad value in debug if not reset */,
_generation.promotion_failed()); _young_gen.promotion_failed());
} }
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@ -835,10 +834,10 @@ ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
ScanClosure(g, gc_barrier) {} ScanClosure(g, gc_barrier) {}
EvacuateFollowersClosureGeneral:: EvacuateFollowersClosureGeneral::
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
OopsInGenClosure* cur, OopsInGenClosure* cur,
OopsInGenClosure* older) : OopsInGenClosure* older) :
_gch(gch), _level(level), _gch(gch),
_scan_cur_or_nonheap(cur), _scan_older(older) _scan_cur_or_nonheap(cur), _scan_older(older)
{} {}
@ -846,10 +845,10 @@ void EvacuateFollowersClosureGeneral::do_void() {
do { do {
// Beware: this call will lead to closure applications via virtual // Beware: this call will lead to closure applications via virtual
// calls. // calls.
_gch->oop_since_save_marks_iterate(_level, _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
_scan_cur_or_nonheap, _scan_cur_or_nonheap,
_scan_older); _scan_older);
} while (!_gch->no_allocs_since_save_marks(_level)); } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
} }
@ -972,14 +971,14 @@ void ParNewGeneration::collect(bool full,
ScanClosure scan_without_gc_barrier(this, false); ScanClosure scan_without_gc_barrier(this, false);
ScanClosureWithParBarrier scan_with_gc_barrier(this, true); ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, EvacuateFollowersClosureGeneral evacuate_followers(gch,
&scan_without_gc_barrier, &scan_with_gc_barrier); &scan_without_gc_barrier, &scan_with_gc_barrier);
rp->setup_policy(clear_all_soft_refs); rp->setup_policy(clear_all_soft_refs);
// Can the mt_degree be set later (at run_task() time would be best)? // Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers); rp->set_active_mt_degree(active_workers);
ReferenceProcessorStats stats; ReferenceProcessorStats stats;
if (rp->processing_is_mt()) { if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
stats = rp->process_discovered_references(&is_alive, &keep_alive, stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, &task_executor, &evacuate_followers, &task_executor,
_gc_timer, _gc_tracer.gc_id()); _gc_timer, _gc_tracer.gc_id());
@ -1045,7 +1044,7 @@ void ParNewGeneration::collect(bool full,
rp->set_enqueuing_is_done(true); rp->set_enqueuing_is_done(true);
if (rp->processing_is_mt()) { if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
rp->enqueue_discovered_references(&task_executor); rp->enqueue_discovered_references(&task_executor);
} else { } else {
rp->enqueue_discovered_references(NULL); rp->enqueue_discovered_references(NULL);

View File

@ -234,14 +234,14 @@ class ParScanThreadState {
class ParNewGenTask: public AbstractGangTask { class ParNewGenTask: public AbstractGangTask {
private: private:
ParNewGeneration* _gen; ParNewGeneration* _young_gen;
Generation* _old_gen; Generation* _old_gen;
HeapWord* _young_old_boundary; HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set; class ParScanThreadStateSet* _state_set;
StrongRootsScope* _strong_roots_scope; StrongRootsScope* _strong_roots_scope;
public: public:
ParNewGenTask(ParNewGeneration* gen, ParNewGenTask(ParNewGeneration* young_gen,
Generation* old_gen, Generation* old_gen,
HeapWord* young_old_boundary, HeapWord* young_old_boundary,
ParScanThreadStateSet* state_set, ParScanThreadStateSet* state_set,
@ -264,11 +264,10 @@ class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
class EvacuateFollowersClosureGeneral: public VoidClosure { class EvacuateFollowersClosureGeneral: public VoidClosure {
private: private:
GenCollectedHeap* _gch; GenCollectedHeap* _gch;
int _level;
OopsInGenClosure* _scan_cur_or_nonheap; OopsInGenClosure* _scan_cur_or_nonheap;
OopsInGenClosure* _scan_older; OopsInGenClosure* _scan_older;
public: public:
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
OopsInGenClosure* cur, OopsInGenClosure* cur,
OopsInGenClosure* older); OopsInGenClosure* older);
virtual void do_void(); virtual void do_void();
@ -288,12 +287,14 @@ class ScanClosureWithParBarrier: public ScanClosure {
// Implements AbstractRefProcTaskExecutor for ParNew. // Implements AbstractRefProcTaskExecutor for ParNew.
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor { class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private: private:
ParNewGeneration& _generation; ParNewGeneration& _young_gen;
Generation& _old_gen;
ParScanThreadStateSet& _state_set; ParScanThreadStateSet& _state_set;
public: public:
ParNewRefProcTaskExecutor(ParNewGeneration& generation, ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
Generation& old_gen,
ParScanThreadStateSet& state_set) ParScanThreadStateSet& state_set)
: _generation(generation), _state_set(state_set) : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
{ } { }
// Executes a task using worker threads. // Executes a task using worker threads.
@ -353,7 +354,7 @@ class ParNewGeneration: public DefNewGeneration {
void set_survivor_overflow(bool v) { _survivor_overflow = v; } void set_survivor_overflow(bool v) { _survivor_overflow = v; }
public: public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level); ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
~ParNewGeneration() { ~ParNewGeneration() {
for (uint i = 0; i < ParallelGCThreads; i++) for (uint i = 0; i < ParallelGCThreads; i++)

View File

@ -72,7 +72,7 @@ inline void ParScanClosure::do_oop_work(T* p,
bool root_scan) { bool root_scan) {
assert((!GenCollectedHeap::heap()->is_in_reserved(p) || assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p)) generation()->is_in_reserved(p))
&& (generation()->level() == 0 || gc_barrier), && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
"The gen must be right, and we must be doing the barrier " "The gen must be right, and we must be doing the barrier "
"in older generations."); "in older generations.");
T heap_oop = oopDesc::load_heap_oop(p); T heap_oop = oopDesc::load_heap_oop(p);

View File

@ -198,8 +198,7 @@ void VM_GenCollectFullConcurrent::doit() {
assert(SafepointSynchronize::is_at_safepoint(), assert(SafepointSynchronize::is_at_safepoint(),
"We can only be executing this arm of if at a safepoint"); "We can only be executing this arm of if at a safepoint");
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
0 /* collect only youngest gen */);
} // Else no need for a foreground young gc } // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) || assert((_gc_count_before < gch->total_collections()) ||
(GC_locker::is_active() /* gc may have been skipped */ (GC_locker::is_active() /* gc may have been skipped */

View File

@ -1304,7 +1304,7 @@ void PSAdaptiveSizePolicy::update_averages(bool is_survivor_overflow,
size_t survived_guess = survived + promoted; size_t survived_guess = survived + promoted;
_avg_survived->sample(survived_guess); _avg_survived->sample(survived_guess);
} }
avg_promoted()->sample(promoted + _avg_pretenured->padded_average()); avg_promoted()->sample(promoted);
if (PrintAdaptiveSizePolicy) { if (PrintAdaptiveSizePolicy) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(

View File

@ -199,7 +199,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
// Allocations in the old generation need to be reported // Allocations in the old generation need to be reported
if (res != NULL) { if (res != NULL) {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
heap->size_policy()->tenured_allocation(word_size); heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
} }
return res; return res;

View File

@ -58,11 +58,13 @@
// Methods of protected closure types. // Methods of protected closure types.
DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
assert(g->level() == 0, "Optimized for youngest gen."); assert(_young_gen->kind() == Generation::ParNew ||
_young_gen->kind() == Generation::DefNew, "Expected the young generation here");
} }
bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
} }
DefNewGeneration::KeepAliveClosure:: DefNewGeneration::KeepAliveClosure::
@ -85,39 +87,38 @@ void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGenera
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
DefNewGeneration::EvacuateFollowersClosure:: DefNewGeneration::EvacuateFollowersClosure::
EvacuateFollowersClosure(GenCollectedHeap* gch, int level, EvacuateFollowersClosure(GenCollectedHeap* gch,
ScanClosure* cur, ScanClosure* older) : ScanClosure* cur,
_gch(gch), _level(level), ScanClosure* older) :
_scan_cur_or_nonheap(cur), _scan_older(older) _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
{} {}
void DefNewGeneration::EvacuateFollowersClosure::do_void() { void DefNewGeneration::EvacuateFollowersClosure::do_void() {
do { do {
_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
_scan_older); } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
} while (!_gch->no_allocs_since_save_marks(_level));
} }
DefNewGeneration::FastEvacuateFollowersClosure:: DefNewGeneration::FastEvacuateFollowersClosure::
FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, FastEvacuateFollowersClosure(GenCollectedHeap* gch,
DefNewGeneration* gen, FastScanClosure* cur,
FastScanClosure* cur, FastScanClosure* older) : FastScanClosure* older) :
_gch(gch), _level(level), _gen(gen), _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
_scan_cur_or_nonheap(cur), _scan_older(older) {
{} assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
_gen = (DefNewGeneration*)_gch->young_gen();
}
void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
do { do {
_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
_scan_older); } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
} while (!_gch->no_allocs_since_save_marks(_level));
guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
} }
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{ {
assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -127,7 +128,6 @@ void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{ {
assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -168,7 +168,6 @@ void KlassScanClosure::do_klass(Klass* klass) {
ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
_g(g) _g(g)
{ {
assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -186,9 +185,8 @@ KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
DefNewGeneration::DefNewGeneration(ReservedSpace rs, DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size, size_t initial_size,
int level,
const char* policy) const char* policy)
: Generation(rs, initial_size, level), : Generation(rs, initial_size),
_promo_failure_drain_in_progress(false), _promo_failure_drain_in_progress(false),
_should_allocate_from_space(false) _should_allocate_from_space(false)
{ {
@ -372,22 +370,18 @@ bool DefNewGeneration::expand(size_t bytes) {
return success; return success;
} }
void DefNewGeneration::compute_new_size() { void DefNewGeneration::compute_new_size() {
// This is called after a gc that includes the following generation // This is called after a GC that includes the old generation, so from-space
// (which is required to exist.) So from-space will normally be empty. // will normally be empty.
// Note that we check both spaces, since if scavenge failed they revert roles. // Note that we check both spaces, since if scavenge failed they revert roles.
// If not we bail out (otherwise we would have to relocate the objects) // If not we bail out (otherwise we would have to relocate the objects).
if (!from()->is_empty() || !to()->is_empty()) { if (!from()->is_empty() || !to()->is_empty()) {
return; return;
} }
int next_level = level() + 1;
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(next_level == 1, "DefNewGeneration must be a young gen");
Generation* old_gen = gch->old_gen(); size_t old_size = gch->old_gen()->capacity();
size_t old_size = old_gen->capacity();
size_t new_size_before = _virtual_space.committed_size(); size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = spec()->init_size(); size_t min_new_size = spec()->init_size();
size_t max_new_size = reserved().byte_size(); size_t max_new_size = reserved().byte_size();
@ -603,7 +597,7 @@ void DefNewGeneration::collect(bool full,
gch->rem_set()->prepare_for_younger_refs_iterate(false); gch->rem_set()->prepare_for_younger_refs_iterate(false);
assert(gch->no_allocs_since_save_marks(0), assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
"save marks have not been newly set."); "save marks have not been newly set.");
// Not very pretty. // Not very pretty.
@ -619,11 +613,11 @@ void DefNewGeneration::collect(bool full,
false); false);
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, FastEvacuateFollowersClosure evacuate_followers(gch,
&fsc_with_no_gc_barrier, &fsc_with_no_gc_barrier,
&fsc_with_gc_barrier); &fsc_with_gc_barrier);
assert(gch->no_allocs_since_save_marks(0), assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
"save marks have not been newly set."); "save marks have not been newly set.");
{ {
@ -633,7 +627,7 @@ void DefNewGeneration::collect(bool full,
StrongRootsScope srs(0); StrongRootsScope srs(0);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
_level, GenCollectedHeap::YoungGen,
true, // Process younger gens, if any, true, // Process younger gens, if any,
// as strong roots. // as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::SO_ScavengeCodeCache,
@ -870,8 +864,10 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words) { size_t max_alloc_words) {
if (requestor == this || _promotion_failed) return; if (requestor == this || _promotion_failed) {
assert(requestor->level() > level(), "DefNewGeneration must be youngest"); return;
}
assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
if (to_space->top() > to_space->bottom()) { if (to_space->top() > to_space->bottom()) {

View File

@ -154,9 +154,9 @@ protected:
public: // was "protected" but caused compile error on win32 public: // was "protected" but caused compile error on win32
class IsAliveClosure: public BoolObjectClosure { class IsAliveClosure: public BoolObjectClosure {
Generation* _g; Generation* _young_gen;
public: public:
IsAliveClosure(Generation* g); IsAliveClosure(Generation* young_gen);
bool do_object_b(oop p); bool do_object_b(oop p);
}; };
@ -183,31 +183,28 @@ protected:
class EvacuateFollowersClosure: public VoidClosure { class EvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch; GenCollectedHeap* _gch;
int _level;
ScanClosure* _scan_cur_or_nonheap; ScanClosure* _scan_cur_or_nonheap;
ScanClosure* _scan_older; ScanClosure* _scan_older;
public: public:
EvacuateFollowersClosure(GenCollectedHeap* gch, int level, EvacuateFollowersClosure(GenCollectedHeap* gch,
ScanClosure* cur, ScanClosure* older); ScanClosure* cur, ScanClosure* older);
void do_void(); void do_void();
}; };
class FastEvacuateFollowersClosure: public VoidClosure { class FastEvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch; GenCollectedHeap* _gch;
int _level;
DefNewGeneration* _gen; DefNewGeneration* _gen;
FastScanClosure* _scan_cur_or_nonheap; FastScanClosure* _scan_cur_or_nonheap;
FastScanClosure* _scan_older; FastScanClosure* _scan_older;
public: public:
FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, FastEvacuateFollowersClosure(GenCollectedHeap* gch,
DefNewGeneration* gen,
FastScanClosure* cur, FastScanClosure* cur,
FastScanClosure* older); FastScanClosure* older);
void do_void(); void do_void();
}; };
public: public:
DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, DefNewGeneration(ReservedSpace rs, size_t initial_byte_size,
const char* policy="Copy"); const char* policy="Copy");
virtual void ref_processor_init(); virtual void ref_processor_init();

View File

@ -36,6 +36,7 @@
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp" #include "gc/shared/gcTraceTime.hpp"
#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/generation.hpp"
#include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/modRefBarrierSet.hpp" #include "gc/shared/modRefBarrierSet.hpp"
#include "gc/shared/referencePolicy.hpp" #include "gc/shared/referencePolicy.hpp"
@ -53,8 +54,7 @@
#include "utilities/events.hpp" #include "utilities/events.hpp"
#include "utilities/stack.inline.hpp" #include "utilities/stack.inline.hpp"
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) { void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) {
guarantee(level == 1, "We always collect both old and young.");
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -87,11 +87,11 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
// Capture used regions for each generation that will be // Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can // subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below). // be made intelligently (see clear / invalidate further below).
gch->save_used_regions(level); gch->save_used_regions();
allocate_stacks(); allocate_stacks();
mark_sweep_phase1(level, clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2(); mark_sweep_phase2();
@ -99,7 +99,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
mark_sweep_phase3(level); mark_sweep_phase3();
mark_sweep_phase4(); mark_sweep_phase4();
@ -184,8 +184,7 @@ void GenMarkSweep::deallocate_stacks() {
_objarray_stack.clear(true); _objarray_stack.clear(true);
} }
void GenMarkSweep::mark_sweep_phase1(int level, void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
@ -195,7 +194,6 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// use OopsInGenClosure constructor which takes a generation, // use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors // as the Universe has not been created when the static constructors
// are run. // are run.
assert(level == 1, "We don't use mark-sweep on young generations");
follow_root_closure.set_orig_generation(gch->old_gen()); follow_root_closure.set_orig_generation(gch->old_gen());
// Need new claim bits before marking starts. // Need new claim bits before marking starts.
@ -205,10 +203,10 @@ void GenMarkSweep::mark_sweep_phase1(int level,
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
level, GenCollectedHeap::OldGen,
false, // Younger gens are not roots. false, // Younger gens are not roots.
GenCollectedHeap::SO_None, GenCollectedHeap::SO_None,
GenCollectedHeap::StrongRootsOnly, ClassUnloading,
&follow_root_closure, &follow_root_closure,
&follow_root_closure, &follow_root_closure,
&follow_cld_closure); &follow_cld_closure);
@ -273,7 +271,7 @@ public:
} }
}; };
void GenMarkSweep::mark_sweep_phase3(int level) { void GenMarkSweep::mark_sweep_phase3() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
@ -286,14 +284,13 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
// use OopsInGenClosure constructor which takes a generation, // use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors // as the Universe has not been created when the static constructors
// are run. // are run.
assert(level == 1, "We don't use mark-sweep on young generations.");
adjust_pointer_closure.set_orig_generation(gch->old_gen()); adjust_pointer_closure.set_orig_generation(gch->old_gen());
{ {
StrongRootsScope srs(1); StrongRootsScope srs(1);
gch->gen_process_roots(&srs, gch->gen_process_roots(&srs,
level, GenCollectedHeap::OldGen,
false, // Younger gens are not roots. false, // Younger gens are not roots.
GenCollectedHeap::SO_AllCodeCache, GenCollectedHeap::SO_AllCodeCache,
GenCollectedHeap::StrongAndWeakRoots, GenCollectedHeap::StrongAndWeakRoots,

View File

@ -31,17 +31,16 @@ class GenMarkSweep : public MarkSweep {
friend class VM_MarkSweep; friend class VM_MarkSweep;
friend class G1MarkSweep; friend class G1MarkSweep;
public: public:
static void invoke_at_safepoint(int level, ReferenceProcessor* rp, static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs);
bool clear_all_softrefs);
private: private:
// Mark live objects // Mark live objects
static void mark_sweep_phase1(int level, bool clear_all_softrefs); static void mark_sweep_phase1(bool clear_all_softrefs);
// Calculate new addresses // Calculate new addresses
static void mark_sweep_phase2(); static void mark_sweep_phase2();
// Update pointers // Update pointers
static void mark_sweep_phase3(int level); static void mark_sweep_phase3();
// Move objects to new positions // Move objects to new positions
static void mark_sweep_phase4(); static void mark_sweep_phase4();

View File

@ -41,9 +41,9 @@
#endif #endif
TenuredGeneration::TenuredGeneration(ReservedSpace rs, TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level, size_t initial_byte_size,
GenRemSet* remset) : GenRemSet* remset) :
CardGeneration(rs, initial_byte_size, level, remset) CardGeneration(rs, initial_byte_size, remset)
{ {
HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high(); HeapWord* end = (HeapWord*) _virtual_space.high();
@ -134,11 +134,12 @@ void TenuredGeneration::compute_new_size() {
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity())); " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
} }
void TenuredGeneration::update_gc_stats(int current_level, void TenuredGeneration::update_gc_stats(Generation* current_generation,
bool full) { bool full) {
// If the next lower level(s) has been collected, gather any statistics // If the young generation has been collected, gather any statistics
// that are of interest at this point. // that are of interest at this point.
if (!full && (current_level + 1) == level()) { bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
if (!full && current_is_young) {
// Calculate size of data promoted from the younger generations // Calculate size of data promoted from the younger generations
// before doing the collection. // before doing the collection.
size_t used_before_gc = used(); size_t used_before_gc = used();
@ -192,7 +193,7 @@ void TenuredGeneration::collect(bool full,
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
gc_timer->register_gc_end(); gc_timer->register_gc_end();

View File

@ -55,8 +55,9 @@ class TenuredGeneration: public CardGeneration {
void assert_correct_size_change_locking(); void assert_correct_size_change_locking();
public: public:
TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, TenuredGeneration(ReservedSpace rs,
int level, GenRemSet* remset); size_t initial_byte_size,
GenRemSet* remset);
Generation::Name kind() { return Generation::MarkSweepCompact; } Generation::Name kind() { return Generation::MarkSweepCompact; }
@ -120,7 +121,7 @@ class TenuredGeneration: public CardGeneration {
// Statistics // Statistics
virtual void update_gc_stats(int level, bool full); virtual void update_gc_stats(Generation* current_generation, bool full);
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const; virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;

View File

@ -35,10 +35,10 @@
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardGeneration::CardGeneration(ReservedSpace rs,
int level, size_t initial_byte_size,
GenRemSet* remset) : GenRemSet* remset) :
Generation(rs, initial_byte_size, level), _rs(remset), Generation(rs, initial_byte_size), _rs(remset),
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue() _used_at_prologue()
{ {

View File

@ -52,8 +52,7 @@ class CardGeneration: public Generation {
size_t _capacity_at_prologue; size_t _capacity_at_prologue;
size_t _used_at_prologue; size_t _used_at_prologue;
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
GenRemSet* remset);
virtual void assert_correct_size_change_locking() = 0; virtual void assert_correct_size_change_locking() = 0;

View File

@ -104,7 +104,9 @@ void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
void CardTableRS::younger_refs_iterate(Generation* g, void CardTableRS::younger_refs_iterate(Generation* g,
OopsInGenClosure* blk, OopsInGenClosure* blk,
uint n_threads) { uint n_threads) {
_last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); // The indexing in this array is slightly odd. We want to access
// the old generation record here, which is at index 2.
_last_cur_val_in_gen[2] = cur_youngergen_card_val();
g->younger_refs_iterate(blk, n_threads); g->younger_refs_iterate(blk, n_threads);
} }
@ -300,7 +302,8 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
} }
void CardTableRS::clear_into_younger(Generation* old_gen) { void CardTableRS::clear_into_younger(Generation* old_gen) {
assert(old_gen->level() == 1, "Should only be called for the old generation"); assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
"Should only be called for the old generation");
// The card tables for the youngest gen need never be cleared. // The card tables for the youngest gen need never be cleared.
// There's a bit of subtlety in the clear() and invalidate() // There's a bit of subtlety in the clear() and invalidate()
// methods that we exploit here and in invalidate_or_clear() // methods that we exploit here and in invalidate_or_clear()
@ -311,7 +314,8 @@ void CardTableRS::clear_into_younger(Generation* old_gen) {
} }
void CardTableRS::invalidate_or_clear(Generation* old_gen) { void CardTableRS::invalidate_or_clear(Generation* old_gen) {
assert(old_gen->level() == 1, "Should only be called for the old generation"); assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
"Should only be called for the old generation");
// Invalidate the cards for the currently occupied part of // Invalidate the cards for the currently occupied part of
// the old generation and clear the cards for the // the old generation and clear the cards for the
// unoccupied part of the generation (if any, making use // unoccupied part of the generation (if any, making use
@ -377,7 +381,9 @@ public:
VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
void do_generation(Generation* gen) { void do_generation(Generation* gen) {
// Skip the youngest generation. // Skip the youngest generation.
if (gen->level() == 0) return; if (GenCollectedHeap::heap()->is_young_gen(gen)) {
return;
}
// Normally, we're interested in pointers to younger generations. // Normally, we're interested in pointers to younger generations.
VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
gen->space_iterate(&blk, true); gen->space_iterate(&blk, true);

View File

@ -76,9 +76,8 @@ class CardTableRS: public GenRemSet {
// An array that contains, for each generation, the card table value last // An array that contains, for each generation, the card table value last
// used as the current value for a younger_refs_do iteration of that // used as the current value for a younger_refs_do iteration of that
// portion of the table. (The perm gen is index 0; other gens are at // portion of the table. The perm gen is index 0. The young gen is index 1,
// their level plus 1. They youngest gen is in the table, but will // but will always have the value "clean_card". The old gen is index 2.
// always have the value "clean_card".)
jbyte* _last_cur_val_in_gen; jbyte* _last_cur_val_in_gen;
jbyte _cur_youngergen_card_val; jbyte _cur_youngergen_card_val;

View File

@ -746,11 +746,11 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
return result; // Could be null if we are out of space. return result; // Could be null if we are out of space.
} else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection. // Do an incremental collection.
gch->do_collection(false /* full */, gch->do_collection(false, // full
false /* clear_all_soft_refs */, false, // clear_all_soft_refs
size /* size */, size, // size
is_tlab /* is_tlab */, is_tlab, // is_tlab
number_of_generations() - 1 /* max_level */); GenCollectedHeap::OldGen); // max_generation
} else { } else {
if (Verbose && PrintGCDetails) { if (Verbose && PrintGCDetails) {
gclog_or_tty->print(" :: Trying full because partial may fail :: "); gclog_or_tty->print(" :: Trying full because partial may fail :: ");
@ -759,11 +759,11 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
// for the original code and why this has been simplified // for the original code and why this has been simplified
// with from-space allocation criteria modified and // with from-space allocation criteria modified and
// such allocation moved out of the safepoint path. // such allocation moved out of the safepoint path.
gch->do_collection(true /* full */, gch->do_collection(true, // full
false /* clear_all_soft_refs */, false, // clear_all_soft_refs
size /* size */, size, // size
is_tlab /* is_tlab */, is_tlab, // is_tlab
number_of_generations() - 1 /* max_level */); GenCollectedHeap::OldGen); // max_generation
} }
result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
@ -787,11 +787,11 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
{ {
UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
gch->do_collection(true /* full */, gch->do_collection(true, // full
true /* clear_all_soft_refs */, true, // clear_all_soft_refs
size /* size */, size, // size
is_tlab /* is_tlab */, is_tlab, // is_tlab
number_of_generations() - 1 /* max_level */); GenCollectedHeap::OldGen); // max_generation
} }
result = gch->attempt_allocation(size, is_tlab, false /* first_only */); result = gch->attempt_allocation(size, is_tlab, false /* first_only */);

View File

@ -261,8 +261,6 @@ class GenCollectorPolicy : public CollectorPolicy {
size_t initial_old_size() { return _initial_old_size; } size_t initial_old_size() { return _initial_old_size; }
size_t max_old_size() { return _max_old_size; } size_t max_old_size() { return _max_old_size; }
int number_of_generations() { return 2; }
GenerationSpec* young_gen_spec() const { GenerationSpec* young_gen_spec() const {
assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized"); assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized");
return _young_gen_spec; return _young_gen_spec;

View File

@ -127,11 +127,11 @@ jint GenCollectedHeap::initialize() {
set_barrier_set(rem_set()->bs()); set_barrier_set(rem_set()->bs());
ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
_young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set()); _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
_old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set()); _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
clear_incremental_collection_failed(); clear_incremental_collection_failed();
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -202,12 +202,8 @@ size_t GenCollectedHeap::used() const {
return _young_gen->used() + _old_gen->used(); return _young_gen->used() + _old_gen->used();
} }
// Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions() {
void GenCollectedHeap::save_used_regions(int level) { _old_gen->save_used_region();
assert(level == 0 || level == 1, "Illegal level parameter");
if (level == 1) {
_old_gen->save_used_region();
}
_young_gen->save_used_region(); _young_gen->save_used_region();
} }
@ -337,8 +333,16 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
record_gen_tops_before_GC(); record_gen_tops_before_GC();
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, // I didn't want to change the logging when removing the level concept,
gen->level(), // but I guess this logging could say young/old or something instead of 0/1.
uint level;
if (heap()->is_young_gen(gen)) {
level = 0;
} else {
level = 1;
}
gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
level,
gen->stat_record()->invocations, gen->stat_record()->invocations,
size * HeapWordSize); size * HeapWordSize);
} }
@ -399,7 +403,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
gen->stat_record()->accumulated_time.stop(); gen->stat_record()->accumulated_time.stop();
update_gc_stats(gen->level(), full); update_gc_stats(gen, full);
if (run_verification && VerifyAfterGC) { if (run_verification && VerifyAfterGC) {
HandleMark hm; // Discard invalid handles created during verification HandleMark hm; // Discard invalid handles created during verification
@ -412,11 +416,11 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
} }
} }
void GenCollectedHeap::do_collection(bool full, void GenCollectedHeap::do_collection(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool is_tlab, bool is_tlab,
int max_level) { GenerationType max_generation) {
ResourceMark rm; ResourceMark rm;
DEBUG_ONLY(Thread* my_thread = Thread::current();) DEBUG_ONLY(Thread* my_thread = Thread::current();)
@ -444,7 +448,7 @@ void GenCollectedHeap::do_collection(bool full,
{ {
FlagSetting fl(_is_gc_active, true); FlagSetting fl(_is_gc_active, true);
bool complete = full && (max_level == 1 /* old */); bool complete = full && (max_generation == OldGen);
const char* gc_cause_prefix = complete ? "Full GC" : "GC"; const char* gc_cause_prefix = complete ? "Full GC" : "GC";
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
@ -458,9 +462,8 @@ void GenCollectedHeap::do_collection(bool full,
bool run_verification = total_collections() >= VerifyGCStartAt; bool run_verification = total_collections() >= VerifyGCStartAt;
bool prepared_for_verification = false; bool prepared_for_verification = false;
int max_level_collected = 0; bool collected_old = false;
bool old_collects_young = (max_level == 1) && bool old_collects_young = complete &&
full &&
_old_gen->full_collects_younger_generations(); _old_gen->full_collects_younger_generations();
if (!old_collects_young && if (!old_collects_young &&
_young_gen->should_collect(full, size, is_tlab)) { _young_gen->should_collect(full, size, is_tlab)) {
@ -487,7 +490,7 @@ void GenCollectedHeap::do_collection(bool full,
bool must_restore_marks_for_biased_locking = false; bool must_restore_marks_for_biased_locking = false;
if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) { if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
if (!complete) { if (!complete) {
// The full_collections increment was missed above. // The full_collections increment was missed above.
increment_total_full_collections(); increment_total_full_collections();
@ -510,13 +513,13 @@ void GenCollectedHeap::do_collection(bool full,
true); true);
must_restore_marks_for_biased_locking = true; must_restore_marks_for_biased_locking = true;
max_level_collected = 1; collected_old = true;
} }
// Update "complete" boolean wrt what actually transpired -- // Update "complete" boolean wrt what actually transpired --
// for instance, a promotion failure could have led to // for instance, a promotion failure could have led to
// a whole heap collection. // a whole heap collection.
complete = complete || (max_level_collected == 1 /* old */); complete = complete || collected_old;
if (complete) { // We did a "major" collection if (complete) { // We did a "major" collection
// FIXME: See comment at pre_full_gc_dump call // FIXME: See comment at pre_full_gc_dump call
@ -533,7 +536,7 @@ void GenCollectedHeap::do_collection(bool full,
} }
// Adjust generation sizes. // Adjust generation sizes.
if (max_level_collected == 1 /* old */) { if (collected_old) {
_old_gen->compute_new_size(); _old_gen->compute_new_size();
} }
_young_gen->compute_new_size(); _young_gen->compute_new_size();
@ -661,11 +664,10 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
} }
} }
void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope, void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
int level, GenerationType type,
bool younger_gens_as_roots, bool younger_gens_as_roots,
ScanningOption so, ScanningOption so,
bool only_strong_roots, bool only_strong_roots,
@ -675,7 +677,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
bool is_moving_collection = false; bool is_moving_collection = false;
if (level == 0 || is_adjust_phase) { if (type == YoungGen || is_adjust_phase) {
// young collections are always moving // young collections are always moving
is_moving_collection = true; is_moving_collection = true;
} }
@ -691,7 +693,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
if (younger_gens_as_roots) { if (younger_gens_as_roots) {
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
if (level == 1) { if (type == OldGen) {
not_older_gens->set_generation(_young_gen); not_older_gens->set_generation(_young_gen);
_young_gen->oop_iterate(not_older_gens); _young_gen->oop_iterate(not_older_gens);
} }
@ -699,8 +701,8 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
} }
} }
// When collection is parallel, all threads get to cooperate to do // When collection is parallel, all threads get to cooperate to do
// older-gen scanning. // old generation scanning.
if (level == 0) { if (type == YoungGen) {
older_gens->set_generation(_old_gen); older_gens->set_generation(_old_gen);
rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads()); rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
older_gens->reset_generation(); older_gens->reset_generation();
@ -724,10 +726,10 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
void GenCollectedHeap:: \ void GenCollectedHeap:: \
oop_since_save_marks_iterate(int level, \ oop_since_save_marks_iterate(GenerationType gen, \
OopClosureType* cur, \ OopClosureType* cur, \
OopClosureType* older) { \ OopClosureType* older) { \
if (level == 0) { \ if (gen == YoungGen) { \
_young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
_old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
} else { \ } else { \
@ -739,8 +741,8 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
bool GenCollectedHeap::no_allocs_since_save_marks(int level) { bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) {
if (level == 0 && !_young_gen->no_allocs_since_save_marks()) { if (include_young && !_young_gen->no_allocs_since_save_marks()) {
return false; return false;
} }
return _old_gen->no_allocs_since_save_marks(); return _old_gen->no_allocs_since_save_marks();
@ -770,47 +772,47 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) { } else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API // minor collection for WhiteBox API
collect(cause, 0 /* young */); collect(cause, YoungGen);
} else { } else {
#ifdef ASSERT #ifdef ASSERT
if (cause == GCCause::_scavenge_alot) { if (cause == GCCause::_scavenge_alot) {
// minor collection only // minor collection only
collect(cause, 0 /* young */); collect(cause, YoungGen);
} else { } else {
// Stop-the-world full collection // Stop-the-world full collection
collect(cause, 1 /* old */); collect(cause, OldGen);
} }
#else #else
// Stop-the-world full collection // Stop-the-world full collection
collect(cause, 1 /* old */); collect(cause, OldGen);
#endif #endif
} }
} }
void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
// The caller doesn't have the Heap_lock // The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
MutexLocker ml(Heap_lock); MutexLocker ml(Heap_lock);
collect_locked(cause, max_level); collect_locked(cause, max_generation);
} }
void GenCollectedHeap::collect_locked(GCCause::Cause cause) { void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
// The caller has the Heap_lock // The caller has the Heap_lock
assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
collect_locked(cause, 1 /* old */); collect_locked(cause, OldGen);
} }
// this is the private collection interface // this is the private collection interface
// The Heap_lock is expected to be held on entry. // The Heap_lock is expected to be held on entry.
void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
unsigned int gc_count_before = total_collections(); unsigned int gc_count_before = total_collections();
unsigned int full_gc_count_before = total_full_collections(); unsigned int full_gc_count_before = total_full_collections();
{ {
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectFull op(gc_count_before, full_gc_count_before, VM_GenCollectFull op(gc_count_before, full_gc_count_before,
cause, max_level); cause, max_generation);
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
@ -853,39 +855,39 @@ void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
do_full_collection(clear_all_soft_refs, 1 /* old */); do_full_collection(clear_all_soft_refs, OldGen);
} }
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
int max_level) { GenerationType last_generation) {
int local_max_level; GenerationType local_last_generation;
if (!incremental_collection_will_fail(false /* don't consult_young */) && if (!incremental_collection_will_fail(false /* don't consult_young */) &&
gc_cause() == GCCause::_gc_locker) { gc_cause() == GCCause::_gc_locker) {
local_max_level = 0; local_last_generation = YoungGen;
} else { } else {
local_max_level = max_level; local_last_generation = last_generation;
} }
do_collection(true /* full */, do_collection(true, // full
clear_all_soft_refs /* clear_all_soft_refs */, clear_all_soft_refs, // clear_all_soft_refs
0 /* size */, 0, // size
false /* is_tlab */, false, // is_tlab
local_max_level /* max_level */); local_last_generation); // last_generation
// Hack XXX FIX ME !!! // Hack XXX FIX ME !!!
// A scavenge may not have been attempted, or may have // A scavenge may not have been attempted, or may have
// been attempted and failed, because the old gen was too full // been attempted and failed, because the old gen was too full
if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
incremental_collection_will_fail(false /* don't consult_young */)) { incremental_collection_will_fail(false /* don't consult_young */)) {
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->print_cr("GC locker: Trying a full collection " gclog_or_tty->print_cr("GC locker: Trying a full collection "
"because scavenge failed"); "because scavenge failed");
} }
// This time allow the old gen to be collected as well // This time allow the old gen to be collected as well
do_collection(true /* full */, do_collection(true, // full
clear_all_soft_refs /* clear_all_soft_refs */, clear_all_soft_refs, // clear_all_soft_refs
0 /* size */, 0, // size
false /* is_tlab */, false, // is_tlab
1 /* old */ /* max_level */); OldGen); // last_generation
} }
} }
@ -1108,12 +1110,8 @@ void GenCollectedHeap::prepare_for_compaction() {
_young_gen->prepare_for_compaction(&cp); _young_gen->prepare_for_compaction(&cp);
} }
GCStats* GenCollectedHeap::gc_stats(int level) const { GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
if (level == 0) { return gen->gc_stats();
return _young_gen->gc_stats();
} else {
return _old_gen->gc_stats();
}
} }
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
@ -1283,7 +1281,7 @@ void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
oop obj, oop obj,
size_t obj_size) { size_t obj_size) {
guarantee(old_gen->level() == 1, "We only get here with an old generation"); guarantee(old_gen == _old_gen, "We only get here with an old generation");
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL; HeapWord* result = NULL;

View File

@ -55,6 +55,11 @@ class GenCollectedHeap : public CollectedHeap {
public: public:
friend class VM_PopulateDumpSharedSpace; friend class VM_PopulateDumpSharedSpace;
enum GenerationType {
YoungGen,
OldGen
};
private: private:
Generation* _young_gen; Generation* _young_gen;
Generation* _old_gen; Generation* _old_gen;
@ -95,11 +100,11 @@ protected:
// Helper function for two callbacks below. // Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations. // Considers collection of the first max_level+1 generations.
void do_collection(bool full, void do_collection(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool is_tlab, bool is_tlab,
int max_level); GenerationType max_generation);
// Callback from VM_GenCollectForAllocation operation. // Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an // This function does everything necessary/possible to satisfy an
@ -110,7 +115,7 @@ protected:
// Callback from VM_GenCollectFull operation. // Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations. // Perform a full collection of the first max_level+1 generations.
virtual void do_full_collection(bool clear_all_soft_refs); virtual void do_full_collection(bool clear_all_soft_refs);
void do_full_collection(bool clear_all_soft_refs, int max_level); void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
// Does the "cause" of GC indicate that // Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs? // we absolutely __must__ clear soft refs?
@ -121,7 +126,7 @@ public:
FlexibleWorkGang* workers() const { return _workers; } FlexibleWorkGang* workers() const { return _workers; }
GCStats* gc_stats(int level) const; GCStats* gc_stats(Generation* generation) const;
// Returns JNI_OK on success // Returns JNI_OK on success
virtual jint initialize(); virtual jint initialize();
@ -142,6 +147,9 @@ public:
Generation* young_gen() const { return _young_gen; } Generation* young_gen() const { return _young_gen; }
Generation* old_gen() const { return _old_gen; } Generation* old_gen() const { return _old_gen; }
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
// The generational collector policy. // The generational collector policy.
GenCollectorPolicy* gen_policy() const { return _gen_policy; } GenCollectorPolicy* gen_policy() const { return _gen_policy; }
@ -160,8 +168,8 @@ public:
size_t capacity() const; size_t capacity() const;
size_t used() const; size_t used() const;
// Save the "used_region" for generations level and lower. // Save the "used_region" for both generations.
void save_used_regions(int level); void save_used_regions();
size_t max_capacity() const; size_t max_capacity() const;
@ -182,9 +190,9 @@ public:
// The same as above but assume that the caller holds the Heap_lock. // The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause); void collect_locked(GCCause::Cause cause);
// Perform a full collection of the first max_level+1 generations. // Perform a full collection of generations up to and including max_generation.
// Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause, int max_level); void collect(GCCause::Cause cause, GenerationType max_generation);
// Returns "TRUE" iff "p" points into the committed areas of the heap. // Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in(), is_in_closed_subset() and is_in_youngest() may // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
@ -314,10 +322,8 @@ public:
} }
// Update the gc statistics for each generation. // Update the gc statistics for each generation.
// "level" is the level of the latest collection. void update_gc_stats(Generation* current_generation, bool full) {
void update_gc_stats(int current_level, bool full) { _old_gen->update_gc_stats(current_generation, full);
_young_gen->update_gc_stats(current_level, full);
_old_gen->update_gc_stats(current_level, full);
} }
bool no_gc_in_progress() { return !is_gc_active(); } bool no_gc_in_progress() { return !is_gc_active(); }
@ -365,8 +371,8 @@ public:
static GenCollectedHeap* heap(); static GenCollectedHeap* heap();
// Invoke the "do_oop" method of one of the closures "not_older_gens" // Invoke the "do_oop" method of one of the closures "not_older_gens"
// or "older_gens" on root locations for the generation at // or "older_gens" on root locations for the generations depending on
// "level". (The "older_gens" closure is used for scanning references // the type. (The "older_gens" closure is used for scanning references
// from older generations; "not_older_gens" is used everywhere else.) // from older generations; "not_older_gens" is used everywhere else.)
// If "younger_gens_as_roots" is false, younger generations are // If "younger_gens_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to // not scanned as roots; in this case, the caller must be arranging to
@ -396,7 +402,7 @@ public:
static const bool StrongRootsOnly = true; static const bool StrongRootsOnly = true;
void gen_process_roots(StrongRootsScope* scope, void gen_process_roots(StrongRootsScope* scope,
int level, GenerationType type,
bool younger_gens_as_roots, bool younger_gens_as_roots,
ScanningOption so, ScanningOption so,
bool only_strong_roots, bool only_strong_roots,
@ -420,7 +426,7 @@ public:
// applied to references in the generation at "level", and the "older" // applied to references in the generation at "level", and the "older"
// closure to older generations. // closure to older generations.
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
void oop_since_save_marks_iterate(int level, \ void oop_since_save_marks_iterate(GenerationType start_gen, \
OopClosureType* cur, \ OopClosureType* cur, \
OopClosureType* older); OopClosureType* older);
@ -428,21 +434,17 @@ public:
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
// Returns "true" iff no allocations have occurred in any generation at // Returns "true" iff no allocations have occurred since the last
// "level" or above since the last
// call to "save_marks". // call to "save_marks".
bool no_allocs_since_save_marks(int level); bool no_allocs_since_save_marks(bool include_young);
// Returns true if an incremental collection is likely to fail. // Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so; // We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental // otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet. // collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) { bool incremental_collection_will_fail(bool consult_young) {
// Assumes a 2-generation system; the first disjunct remembers if an // The first disjunct remembers if an incremental collection failed, even
// incremental collection failed, even when we thought (second disjunct) // when we thought (second disjunct) that it would not.
// that it would not.
assert(heap()->collector_policy()->is_generation_policy(),
"the following definition may not be suitable for an n(>2)-generation system");
return incremental_collection_failed() || return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe()); (consult_young && !_young_gen->collection_attempt_is_safe());
} }
@ -482,10 +484,10 @@ private:
// iterating over spaces. // iterating over spaces.
void prepare_for_compaction(); void prepare_for_compaction();
// Perform a full collection of the first max_level+1 generations. // Perform a full collection of the generations up to and including max_generation.
// This is the low level interface used by the public versions of // This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry. // collect() and collect_locked(). Caller holds the Heap_lock on entry.
void collect_locked(GCCause::Cause cause, int max_level); void collect_locked(GCCause::Cause cause, GenerationType max_generation);
// Returns success or failure. // Returns success or failure.
bool create_cms_collector(); bool create_cms_collector();

View File

@ -42,8 +42,7 @@
#include "utilities/copy.hpp" #include "utilities/copy.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : Generation::Generation(ReservedSpace rs, size_t initial_size) :
_level(level),
_ref_processor(NULL) { _ref_processor(NULL) {
if (!_virtual_space.initialize(rs, initial_size)) { if (!_virtual_space.initialize(rs, initial_size)) {
vm_exit_during_initialization("Could not reserve enough space for " vm_exit_during_initialization("Could not reserve enough space for "
@ -61,8 +60,10 @@ Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
GenerationSpec* Generation::spec() { GenerationSpec* Generation::spec() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(level() == 0 || level() == 1, "Bad gen level"); if (gch->is_young_gen(this)) {
return level() == 0 ? gch->gen_policy()->young_gen_spec() : gch->gen_policy()->old_gen_spec(); return gch->gen_policy()->young_gen_spec();
}
return gch->gen_policy()->old_gen_spec();
} }
size_t Generation::max_capacity() const { size_t Generation::max_capacity() const {
@ -111,9 +112,17 @@ void Generation::print_summary_info() { print_summary_info_on(tty); }
void Generation::print_summary_info_on(outputStream* st) { void Generation::print_summary_info_on(outputStream* st) {
StatRecord* sr = stat_record(); StatRecord* sr = stat_record();
double time = sr->accumulated_time.seconds(); double time = sr->accumulated_time.seconds();
// I didn't want to change the logging when removing the level concept,
// but I guess this logging could say young/old or something instead of 0/1.
uint level;
if (GenCollectedHeap::heap()->is_young_gen(this)) {
level = 0;
} else {
level = 1;
}
st->print_cr("[Accumulated GC generation %d time %3.7f secs, " st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
"%d GC's, avg GC time %3.7f]", "%u GC's, avg GC time %3.7f]",
level(), time, sr->invocations, level, time, sr->invocations,
sr->invocations > 0 ? time / sr->invocations : 0.0); sr->invocations > 0 ? time / sr->invocations : 0.0);
} }
@ -149,25 +158,14 @@ bool Generation::is_in(const void* p) const {
return blk.sp != NULL; return blk.sp != NULL;
} }
Generation* Generation::next_gen() const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (level() == 0) {
return gch->old_gen();
} else {
return NULL;
}
}
size_t Generation::max_contiguous_available() const { size_t Generation::max_contiguous_available() const {
// The largest number of contiguous free words in this or any higher generation. // The largest number of contiguous free words in this or any higher generation.
size_t max = 0; size_t avail = contiguous_available();
for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { size_t old_avail = 0;
size_t avail = gen->contiguous_available(); if (GenCollectedHeap::heap()->is_young_gen(this)) {
if (avail > max) { old_avail = GenCollectedHeap::heap()->old_gen()->contiguous_available();
max = avail;
}
} }
return max; return MAX2(avail, old_avail);
} }
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {

View File

@ -98,9 +98,6 @@ class Generation: public CHeapObj<mtGC> {
// Memory area reserved for generation // Memory area reserved for generation
VirtualSpace _virtual_space; VirtualSpace _virtual_space;
// Level in the generation hierarchy.
int _level;
// ("Weak") Reference processing support // ("Weak") Reference processing support
ReferenceProcessor* _ref_processor; ReferenceProcessor* _ref_processor;
@ -110,12 +107,8 @@ class Generation: public CHeapObj<mtGC> {
// Statistics for garbage collection // Statistics for garbage collection
GCStats* _gc_stats; GCStats* _gc_stats;
// Returns the next generation in the configuration, or else NULL if this
// is the highest generation.
Generation* next_gen() const;
// Initialize the generation. // Initialize the generation.
Generation(ReservedSpace rs, size_t initial_byte_size, int level); Generation(ReservedSpace rs, size_t initial_byte_size);
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
// "sp" that point into younger generations. // "sp" that point into younger generations.
@ -409,15 +402,14 @@ class Generation: public CHeapObj<mtGC> {
_time_of_last_gc = now; _time_of_last_gc = now;
} }
// Generations may keep statistics about collection. This // Generations may keep statistics about collection. This method
// method updates those statistics. current_level is // updates those statistics. current_generation is the generation
// the level of the collection that has most recently // that was most recently collected. This allows the generation to
// occurred. This allows the generation to decide what // decide what statistics are valid to collect. For example, the
// statistics are valid to collect. For example, the // generation can decide to gather the amount of promoted data if
// generation can decide to gather the amount of promoted data // the collection of the younger generations has completed.
// if the collection of the younger generations has completed.
GCStats* gc_stats() const { return _gc_stats; } GCStats* gc_stats() const { return _gc_stats; }
virtual void update_gc_stats(int current_level, bool full) {} virtual void update_gc_stats(Generation* current_generation, bool full) {}
// Mark sweep support phase2 // Mark sweep support phase2
virtual void prepare_for_compaction(CompactPoint* cp); virtual void prepare_for_compaction(CompactPoint* cp);
@ -502,8 +494,6 @@ class Generation: public CHeapObj<mtGC> {
virtual const char* name() const = 0; virtual const char* name() const = 0;
virtual const char* short_name() const = 0; virtual const char* short_name() const = 0;
int level() const { return _level; }
// Reference Processing accessor // Reference Processing accessor
ReferenceProcessor* const ref_processor() { return _ref_processor; } ReferenceProcessor* const ref_processor() { return _ref_processor; }

View File

@ -36,18 +36,17 @@
#include "gc/cms/parNewGeneration.hpp" #include "gc/cms/parNewGeneration.hpp"
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
Generation* GenerationSpec::init(ReservedSpace rs, int level, Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
GenRemSet* remset) {
switch (name()) { switch (name()) {
case Generation::DefNew: case Generation::DefNew:
return new DefNewGeneration(rs, init_size(), level); return new DefNewGeneration(rs, init_size());
case Generation::MarkSweepCompact: case Generation::MarkSweepCompact:
return new TenuredGeneration(rs, init_size(), level, remset); return new TenuredGeneration(rs, init_size(), remset);
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
case Generation::ParNew: case Generation::ParNew:
return new ParNewGeneration(rs, init_size(), level); return new ParNewGeneration(rs, init_size());
case Generation::ConcurrentMarkSweep: { case Generation::ConcurrentMarkSweep: {
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set"); assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
@ -61,7 +60,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
ConcurrentMarkSweepGeneration* g = NULL; ConcurrentMarkSweepGeneration* g = NULL;
g = new ConcurrentMarkSweepGeneration(rs, g = new ConcurrentMarkSweepGeneration(rs,
init_size(), level, ctrs, UseCMSAdaptiveFreeLists, init_size(), ctrs, UseCMSAdaptiveFreeLists,
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice); (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
g->initialize_performance_counters(); g->initialize_performance_counters();

View File

@ -45,7 +45,7 @@ public:
_max_size(align_size_up(max_size, alignment)) _max_size(align_size_up(max_size, alignment))
{ } { }
Generation* init(ReservedSpace rs, int level, GenRemSet* remset); Generation* init(ReservedSpace rs, GenRemSet* remset);
// Accessors // Accessors
Generation::Name name() const { return _name; } Generation::Name name() const { return _name; }

View File

@ -184,7 +184,7 @@ void VM_GenCollectFull::doit() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
} }
// Returns true iff concurrent GCs unloads metadata. // Returns true iff concurrent GCs unloads metadata.

View File

@ -26,6 +26,7 @@
#define SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP #define SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "memory/heapInspection.hpp" #include "memory/heapInspection.hpp"
#include "prims/jvmtiExport.hpp" #include "prims/jvmtiExport.hpp"
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
@ -193,14 +194,14 @@ class VM_GenCollectForAllocation : public VM_CollectForAllocation {
// GenCollectedHeap heap. // GenCollectedHeap heap.
class VM_GenCollectFull: public VM_GC_Operation { class VM_GenCollectFull: public VM_GC_Operation {
private: private:
int _max_level; GenCollectedHeap::GenerationType _max_generation;
public: public:
VM_GenCollectFull(uint gc_count_before, VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before, uint full_gc_count_before,
GCCause::Cause gc_cause, GCCause::Cause gc_cause,
int max_level) GenCollectedHeap::GenerationType max_generation)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_max_level(max_level) { } _max_generation(max_generation) { }
~VM_GenCollectFull() {} ~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit(); virtual void doit();

View File

@ -381,6 +381,9 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
if (!constants()->is_shared()) { if (!constants()->is_shared()) {
MetadataFactory::free_metadata(loader_data, constants()); MetadataFactory::free_metadata(loader_data, constants());
} }
// Delete any cached resolution errors for the constant pool
SystemDictionary::delete_resolution_error(constants());
set_constants(NULL); set_constants(NULL);
} }

View File

@ -4089,9 +4089,6 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
mnt->adjust_method_entries(the_class(), &trace_name_printed); mnt->adjust_method_entries(the_class(), &trace_name_printed);
} }
// Fix Resolution Error table also to remove old constant pools
SystemDictionary::delete_resolution_error(old_constants);
if (the_class->oop_map_cache() != NULL) { if (the_class->oop_map_cache() != NULL) {
// Flush references to any obsolete methods from the oop map cache // Flush references to any obsolete methods from the oop map cache
// so that obsolete methods are not pinned. // so that obsolete methods are not pinned.

View File

@ -1384,6 +1384,12 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) { if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight); CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
} }
if (!ClassUnloading) {
FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
}
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk", tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K)); (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));

View File

@ -832,7 +832,7 @@ void os::print_environment_variables(outputStream* st, const char** env_list) {
} }
} }
void os::print_cpu_info(outputStream* st) { void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// cpu // cpu
st->print("CPU:"); st->print("CPU:");
st->print("total %d", os::processor_count()); st->print("total %d", os::processor_count());
@ -840,7 +840,7 @@ void os::print_cpu_info(outputStream* st) {
// st->print("(active %d)", os::active_processor_count()); // st->print("(active %d)", os::active_processor_count());
st->print(" %s", VM_Version::cpu_features()); st->print(" %s", VM_Version::cpu_features());
st->cr(); st->cr();
pd_print_cpu_info(st); pd_print_cpu_info(st, buf, buflen);
} }
void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {

View File

@ -588,8 +588,8 @@ class os: AllStatic {
// Output format may be different on different platforms. // Output format may be different on different platforms.
static void print_os_info(outputStream* st); static void print_os_info(outputStream* st);
static void print_os_info_brief(outputStream* st); static void print_os_info_brief(outputStream* st);
static void print_cpu_info(outputStream* st); static void print_cpu_info(outputStream* st, char* buf, size_t buflen);
static void pd_print_cpu_info(outputStream* st); static void pd_print_cpu_info(outputStream* st, char* buf, size_t buflen);
static void print_memory_info(outputStream* st); static void print_memory_info(outputStream* st);
static void print_dll_info(outputStream* st); static void print_dll_info(outputStream* st);
static void print_environment_variables(outputStream* st, const char** env_list); static void print_environment_variables(outputStream* st, const char** env_list);

View File

@ -545,7 +545,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
\ \
nonstatic_field(Generation, _reserved, MemRegion) \ nonstatic_field(Generation, _reserved, MemRegion) \
nonstatic_field(Generation, _virtual_space, VirtualSpace) \ nonstatic_field(Generation, _virtual_space, VirtualSpace) \
nonstatic_field(Generation, _level, int) \
nonstatic_field(Generation, _stat_record, Generation::StatRecord) \ nonstatic_field(Generation, _stat_record, Generation::StatRecord) \
\ \
nonstatic_field(Generation::StatRecord, invocations, int) \ nonstatic_field(Generation::StatRecord, invocations, int) \

View File

@ -280,7 +280,8 @@ void VM_Version_init() {
#ifndef PRODUCT #ifndef PRODUCT
if (PrintMiscellaneous && Verbose) { if (PrintMiscellaneous && Verbose) {
os::print_cpu_info(tty); char buf[512];
os::print_cpu_info(tty, buf, sizeof(buf));
} }
#endif #endif
} }

View File

@ -127,7 +127,6 @@ void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
assert(policy->is_generation_policy(), "Only support two generations"); assert(policy->is_generation_policy(), "Only support two generations");
GenCollectorPolicy* gen_policy = policy->as_generation_policy(); GenCollectorPolicy* gen_policy = policy->as_generation_policy();
guarantee(gen_policy->number_of_generations() == 2, "Only support two-generation heap");
if (gen_policy != NULL) { if (gen_policy != NULL) {
Generation::Name kind = gen_policy->young_gen_spec()->name(); Generation::Name kind = gen_policy->young_gen_spec()->name();
switch (kind) { switch (kind) {

View File

@ -816,7 +816,7 @@ void VMError::report(outputStream* st) {
STEP(250, "(printing CPU info)" ) STEP(250, "(printing CPU info)" )
if (_verbose) { if (_verbose) {
os::print_cpu_info(st); os::print_cpu_info(st, buf, sizeof(buf));
st->cr(); st->cr();
} }

View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8076110
* @summary Redefine running methods that have cached resolution errors
* @library /testlibrary
* @modules java.instrument
* java.base/jdk.internal.org.objectweb.asm
* @build RedefineClassHelper
* @run main RedefineClassHelper
* @run main/othervm -javaagent:redefineagent.jar -XX:TraceRedefineClasses=0x600 RedefineRunningMethodsWithResolutionErrors
*/
import jdk.internal.org.objectweb.asm.ClassWriter;
import jdk.internal.org.objectweb.asm.Label;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.org.objectweb.asm.Opcodes;
import java.lang.reflect.InvocationTargetException;
public class RedefineRunningMethodsWithResolutionErrors extends ClassLoader implements Opcodes {
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
if (name.equals("C")) {
byte[] b = loadC(false);
return defineClass(name, b, 0, b.length);
} else {
return super.findClass(name);
}
}
private static byte[] loadC(boolean redefine) {
ClassWriter cw = new ClassWriter(0);
cw.visit(52, ACC_SUPER | ACC_PUBLIC, "C", null, "java/lang/Object", null);
{
MethodVisitor mv;
mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "m", "()V", null, null);
mv.visitCode();
// First time we run we will:
// 1) Cache resolution errors
// 2) Redefine the class / method
// 3) Try to read the resolution errors that were cached
//
// The redefined method will never run, throw error to be sure
if (redefine) {
createThrowRuntimeExceptionCode(mv, "The redefined method was called");
} else {
createMethodBody(mv);
}
mv.visitMaxs(3, 0);
mv.visitEnd();
}
cw.visitEnd();
return cw.toByteArray();
}
private static void createMethodBody(MethodVisitor mv) {
Label classExists = new Label();
// Cache resolution errors
createLoadNonExistentClassCode(mv, classExists);
// Redefine our own class and method
mv.visitMethodInsn(INVOKESTATIC, "RedefineRunningMethodsWithResolutionErrors", "redefine", "()V");
// Provoke the same error again to make sure the resolution error cache works
createLoadNonExistentClassCode(mv, classExists);
// Test passed
mv.visitInsn(RETURN);
mv.visitFrame(F_SAME, 0, new Object[0], 0, new Object[0]);
mv.visitLabel(classExists);
createThrowRuntimeExceptionCode(mv, "Loaded class that shouldn't exist (\"NonExistentClass\")");
}
private static void createLoadNonExistentClassCode(MethodVisitor mv, Label classExists) {
Label tryLoadBegin = new Label();
Label tryLoadEnd = new Label();
Label catchLoadBlock = new Label();
mv.visitTryCatchBlock(tryLoadBegin, tryLoadEnd, catchLoadBlock, "java/lang/NoClassDefFoundError");
// Try to load a class that does not exist to provoke resolution errors
mv.visitLabel(tryLoadBegin);
mv.visitMethodInsn(INVOKESTATIC, "NonExistentClass", "nonExistentMethod", "()V");
mv.visitLabel(tryLoadEnd);
// No NoClassDefFoundError means NonExistentClass existed, which shouldn't happen
mv.visitJumpInsn(GOTO, classExists);
mv.visitFrame(F_SAME1, 0, new Object[0], 1, new Object[] { "java/lang/NoClassDefFoundError" });
mv.visitLabel(catchLoadBlock);
// Ignore the expected NoClassDefFoundError
mv.visitInsn(POP);
}
private static void createThrowRuntimeExceptionCode(MethodVisitor mv, String msg) {
mv.visitTypeInsn(NEW, "java/lang/RuntimeException");
mv.visitInsn(DUP);
mv.visitLdcInsn(msg);
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/RuntimeException", "<init>", "(Ljava/lang/String;)V");
mv.visitInsn(ATHROW);
}
private static Class<?> c;
public static void redefine() throws Exception {
RedefineClassHelper.redefineClass(c, loadC(true));
}
public static void main(String[] args) throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
c = Class.forName("C", true, new RedefineRunningMethodsWithResolutionErrors());
c.getMethod("m").invoke(null);
}
}