This commit is contained in:
Antonios Printezis 2010-04-26 18:01:55 -04:00
commit e1ae5e1cb0
41 changed files with 1662 additions and 1535 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,10 @@ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; } ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
void initialize_gc_policy_counters(); void initialize_gc_policy_counters();
#if 1
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size); size_t init_survivor_size);
#endif
// Returns true if the incremental mode is enabled. // Returns true if the incremental mode is enabled.
virtual bool has_soft_ended_eden(); virtual bool has_soft_ended_eden();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1815,8 +1815,19 @@ NOT_PRODUCT(
do_compaction_work(clear_all_soft_refs); do_compaction_work(clear_all_soft_refs);
// Has the GC time limit been exceeded? // Has the GC time limit been exceeded?
check_gc_time_limit(); DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
size_t max_eden_size = young_gen->max_capacity() -
young_gen->to()->capacity() -
young_gen->from()->capacity();
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCause::Cause gc_cause = gch->gc_cause();
size_policy()->check_gc_overhead_limit(_young_gen->used(),
young_gen->eden()->used(),
_cmsGen->max_capacity(),
max_eden_size,
full,
gc_cause,
gch->collector_policy());
} else { } else {
do_mark_sweep_work(clear_all_soft_refs, first_state, do_mark_sweep_work(clear_all_soft_refs, first_state,
should_start_over); should_start_over);
@ -1828,55 +1839,6 @@ NOT_PRODUCT(
return; return;
} }
void CMSCollector::check_gc_time_limit() {
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
if (GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
return;
}
// Calculate the fraction of the CMS generation was freed during
// the last collection.
// Only consider the STW compacting cost for now.
//
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
double fraction_free =
((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
if ((100.0 * size_policy()->compacting_gc_cost()) >
((double) GCTimeLimit) &&
((fraction_free * 100) < GCHeapFreeLimit)) {
size_policy()->inc_gc_time_limit_count();
if (UseGCOverheadLimit &&
(size_policy()->gc_time_limit_count() >
AdaptiveSizePolicyGCTimeLimitThreshold)) {
size_policy()->set_gc_time_limit_exceeded(true);
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
size_policy()->reset_gc_time_limit_count();
if (PrintGCDetails) {
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
"of %d%%", GCTimeLimit);
}
} else {
if (PrintGCDetails) {
gclog_or_tty->print_cr(" GC would exceed overhead limit "
"of %d%%", GCTimeLimit);
}
}
} else {
size_policy()->reset_gc_time_limit_count();
}
}
// Resize the perm generation and the tenured generation // Resize the perm generation and the tenured generation
// after obtaining the free list locks for the // after obtaining the free list locks for the
// two generations. // two generations.
@ -6182,6 +6144,11 @@ void CMSCollector::reset(bool asynch) {
} }
curAddr = chunk.end(); curAddr = chunk.end();
} }
// A successful mostly concurrent collection has been done.
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
sp->reset_gc_overhead_limit_count();
_collectorState = Idling; _collectorState = Idling;
} else { } else {
// already have the lock // already have the lock

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -570,10 +570,6 @@ class CMSCollector: public CHeapObj {
ConcurrentMarkSweepPolicy* _collector_policy; ConcurrentMarkSweepPolicy* _collector_policy;
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
// Check whether the gc time limit has been
// exceeded and set the size policy flag
// appropriately.
void check_gc_time_limit();
// XXX Move these to CMSStats ??? FIX ME !!! // XXX Move these to CMSStats ??? FIX ME !!!
elapsedTimer _inter_sweep_timer; // time between sweeps elapsedTimer _inter_sweep_timer; // time between sweeps
elapsedTimer _intra_sweep_timer; // time _in_ sweeps elapsedTimer _intra_sweep_timer; // time _in_ sweeps

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -69,9 +69,9 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
G1CollectorPolicy* g1p = g1h->g1_policy(); G1CollectorPolicy* g1p = g1h->g1_policy();
if (g1p->adaptive_young_list_length()) { if (g1p->adaptive_young_list_length()) {
int regions_visited = 0; int regions_visited = 0;
g1h->young_list_rs_length_sampling_init(); g1h->young_list()->rs_length_sampling_init();
while (g1h->young_list_rs_length_sampling_more()) { while (g1h->young_list()->rs_length_sampling_more()) {
g1h->young_list_rs_length_sampling_next(); g1h->young_list()->rs_length_sampling_next();
++regions_visited; ++regions_visited;
// we try to yield every time we visit 10 regions // we try to yield every time we visit 10 regions
@ -162,6 +162,7 @@ void ConcurrentG1RefineThread::run() {
if (_worker_id >= cg1r()->worker_thread_num()) { if (_worker_id >= cg1r()->worker_thread_num()) {
run_young_rs_sampling(); run_young_rs_sampling();
terminate(); terminate();
return;
} }
_vtime_start = os::elapsedVTime(); _vtime_start = os::elapsedVTime();

View File

@ -767,7 +767,8 @@ void ConcurrentMark::checkpointRootsInitialPre() {
_has_aborted = false; _has_aborted = false;
if (G1PrintReachableAtInitialMark) { if (G1PrintReachableAtInitialMark) {
print_reachable(true, "before"); print_reachable("at-cycle-start",
true /* use_prev_marking */, true /* all */);
} }
// Initialise marking structures. This has to be done in a STW phase. // Initialise marking structures. This has to be done in a STW phase.
@ -1979,19 +1980,21 @@ void ConcurrentMark::checkpointRootsFinalWork() {
#ifndef PRODUCT #ifndef PRODUCT
class ReachablePrinterOopClosure: public OopClosure { class PrintReachableOopClosure: public OopClosure {
private: private:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
public: public:
ReachablePrinterOopClosure(CMBitMapRO* bitmap, PrintReachableOopClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
bool all) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
void do_oop(narrowOop* p) { do_oop_work(p); } void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop( oop* p) { do_oop_work(p); } void do_oop( oop* p) { do_oop_work(p); }
@ -2001,9 +2004,11 @@ public:
const char* str = NULL; const char* str = NULL;
const char* str2 = ""; const char* str2 = "";
if (!_g1h->is_in_g1_reserved(obj)) if (obj == NULL) {
str = "outside G1 reserved"; str = "";
else { } else if (!_g1h->is_in_g1_reserved(obj)) {
str = " O";
} else {
HeapRegion* hr = _g1h->heap_region_containing(obj); HeapRegion* hr = _g1h->heap_region_containing(obj);
guarantee(hr != NULL, "invariant"); guarantee(hr != NULL, "invariant");
bool over_tams = false; bool over_tams = false;
@ -2012,74 +2017,67 @@ public:
} else { } else {
over_tams = hr->obj_allocated_since_next_marking(obj); over_tams = hr->obj_allocated_since_next_marking(obj);
} }
bool marked = _bitmap->isMarked((HeapWord*) obj);
if (over_tams) { if (over_tams) {
str = "over TAMS"; str = " >";
if (_bitmap->isMarked((HeapWord*) obj)) { if (marked) {
str2 = " AND MARKED"; str2 = " AND MARKED";
} }
} else if (_bitmap->isMarked((HeapWord*) obj)) { } else if (marked) {
str = "marked"; str = " M";
} else { } else {
str = "#### NOT MARKED ####"; str = " NOT";
} }
} }
_out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s", _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
p, (void*) obj, str, str2); p, (void*) obj, str, str2);
} }
}; };
class ReachablePrinterClosure: public BitMapClosure { class PrintReachableObjectClosure : public ObjectClosure {
private: private:
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
HeapRegion* _hr;
public: public:
ReachablePrinterClosure(CMBitMapRO* bitmap, PrintReachableObjectClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } bool all,
HeapRegion* hr) :
bool do_bit(size_t offset) { _bitmap(bitmap), _out(out),
HeapWord* addr = _bitmap->offsetToHeapWord(offset); _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
_out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
oop(addr)->oop_iterate(&oopCl);
_out->print_cr("");
return true;
}
};
class ObjInRegionReachablePrinterClosure : public ObjectClosure {
private:
CMBitMapRO* _bitmap;
outputStream* _out;
bool _use_prev_marking;
public:
ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap,
outputStream* out,
bool use_prev_marking) :
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
void do_object(oop o) { void do_object(oop o) {
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking); bool over_tams;
if (_use_prev_marking) {
over_tams = _hr->obj_allocated_since_prev_marking(o);
} else {
over_tams = _hr->obj_allocated_since_next_marking(o);
}
bool marked = _bitmap->isMarked((HeapWord*) o);
bool print_it = _all || over_tams || marked;
_out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o); if (print_it) {
o->oop_iterate(&oopCl); _out->print_cr(" "PTR_FORMAT"%s",
_out->print_cr(""); o, (over_tams) ? " >" : (marked) ? " M" : "");
PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
o->oop_iterate(&oopCl);
}
} }
}; };
class RegionReachablePrinterClosure : public HeapRegionClosure { class PrintReachableRegionClosure : public HeapRegionClosure {
private: private:
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
public: public:
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
@ -2094,22 +2092,35 @@ public:
} }
_out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
"TAMS: "PTR_FORMAT, b, e, t, p); "TAMS: "PTR_FORMAT, b, e, t, p);
_out->print_cr(""); _out->cr();
ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking); HeapWord* from = b;
hr->object_iterate_mem_careful(MemRegion(p, t), &ocl); HeapWord* to = t;
if (to > from) {
_out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
_out->cr();
PrintReachableObjectClosure ocl(_bitmap, _out,
_use_prev_marking, _all, hr);
hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
_out->cr();
}
return false; return false;
} }
RegionReachablePrinterClosure(CMBitMapRO* bitmap, PrintReachableRegionClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } bool all) :
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
}; };
void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) { void ConcurrentMark::print_reachable(const char* str,
gclog_or_tty->print_cr("== Doing reachable object dump... "); bool use_prev_marking,
bool all) {
gclog_or_tty->cr();
gclog_or_tty->print_cr("== Doing heap dump... ");
if (G1PrintReachableBaseFile == NULL) { if (G1PrintReachableBaseFile == NULL) {
gclog_or_tty->print_cr(" #### error: no base file defined"); gclog_or_tty->print_cr(" #### error: no base file defined");
@ -2144,19 +2155,14 @@ void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS"); out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
out->cr(); out->cr();
RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking); out->print_cr("--- ITERATING OVER REGIONS");
out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
out->cr(); out->cr();
PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
_g1h->heap_region_iterate(&rcl); _g1h->heap_region_iterate(&rcl);
out->cr(); out->cr();
ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
out->cr();
bitmap->iterate(&cl);
out->cr();
gclog_or_tty->print_cr(" done"); gclog_or_tty->print_cr(" done");
gclog_or_tty->flush();
} }
#endif // PRODUCT #endif // PRODUCT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -652,11 +652,24 @@ public:
// we do nothing. // we do nothing.
void markAndGrayObjectIfNecessary(oop p); void markAndGrayObjectIfNecessary(oop p);
// This iterates over the marking bitmap (either prev or next) and // It iterates over the heap and for each object it comes across it
// prints out all objects that are marked on the bitmap and indicates // will dump the contents of its reference fields, as well as
// whether what they point to is also marked or not. It also iterates // liveness information for the object and its referents. The dump
// the objects over TAMS (either prev or next). // will be written to a file with the following name:
void print_reachable(bool use_prev_marking, const char* str); // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
// whether the prev (use_prev_marking == true) or next
// (use_prev_marking == false) marking information will be used to
// determine the liveness of each object / referent. If all is true,
// all objects in the heap will be dumped, otherwise only the live
// ones. In the dump the following symbols / abbreviations are used:
// M : an explicitly live object (its bitmap bit is set)
// > : an implicitly live object (over tams)
// O : an object outside the G1 heap (typically: in the perm gen)
// NOT : a reference field whose referent is not live
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void print_reachable(const char* str,
bool use_prev_marking, bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently). // Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap(); void clearNextBitmap();
@ -720,6 +733,19 @@ public:
// to determine whether any heap regions are located above the finger. // to determine whether any heap regions are located above the finger.
void registerCSetRegion(HeapRegion* hr); void registerCSetRegion(HeapRegion* hr);
// Registers the maximum region-end associated with a set of
// regions with CM. Again this is used to determine whether any
// heap regions are located above the finger.
void register_collection_set_finger(HeapWord* max_finger) {
// max_finger is the highest heap region end of the regions currently
// contained in the collection set. If this value is larger than
// _min_finger then we need to gray objects.
// This routine is like registerCSetRegion but for an entire
// collection of regions.
if (max_finger > _min_finger)
_should_gray_objects = true;
}
// Returns "true" if at least one mark has been completed. // Returns "true" if at least one mark has been completed.
bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// turn it on so that the contents of the young list (scan-only / // turn it on so that the contents of the young list (scan-only /
// to-be-collected) are printed at "strategic" points before / during // to-be-collected) are printed at "strategic" points before / during
// / after the collection --- this is useful for debugging // / after the collection --- this is useful for debugging
#define SCAN_ONLY_VERBOSE 0 #define YOUNG_LIST_VERBOSE 0
// CURRENT STATUS // CURRENT STATUS
// This file is under construction. Search for "FIXME". // This file is under construction. Search for "FIXME".
@ -133,8 +133,7 @@ public:
YoungList::YoungList(G1CollectedHeap* g1h) YoungList::YoungList(G1CollectedHeap* g1h)
: _g1h(g1h), _head(NULL), : _g1h(g1h), _head(NULL),
_scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), _length(0),
_length(0), _scan_only_length(0),
_last_sampled_rs_lengths(0), _last_sampled_rs_lengths(0),
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
{ {
@ -166,48 +165,6 @@ void YoungList::add_survivor_region(HeapRegion* hr) {
++_survivor_length; ++_survivor_length;
} }
HeapRegion* YoungList::pop_region() {
while (_head != NULL) {
assert( length() > 0, "list should not be empty" );
HeapRegion* ret = _head;
_head = ret->get_next_young_region();
ret->set_next_young_region(NULL);
--_length;
assert(ret->is_young(), "region should be very young");
// Replace 'Survivor' region type with 'Young'. So the region will
// be treated as a young region and will not be 'confused' with
// newly created survivor regions.
if (ret->is_survivor()) {
ret->set_young();
}
if (!ret->is_scan_only()) {
return ret;
}
// scan-only, we'll add it to the scan-only list
if (_scan_only_tail == NULL) {
guarantee( _scan_only_head == NULL, "invariant" );
_scan_only_head = ret;
_curr_scan_only = ret;
} else {
guarantee( _scan_only_head != NULL, "invariant" );
_scan_only_tail->set_next_young_region(ret);
}
guarantee( ret->get_next_young_region() == NULL, "invariant" );
_scan_only_tail = ret;
// no need to be tagged as scan-only any more
ret->set_young();
++_scan_only_length;
}
assert( length() == 0, "list should be empty" );
return NULL;
}
void YoungList::empty_list(HeapRegion* list) { void YoungList::empty_list(HeapRegion* list) {
while (list != NULL) { while (list != NULL) {
HeapRegion* next = list->get_next_young_region(); HeapRegion* next = list->get_next_young_region();
@ -225,12 +182,6 @@ void YoungList::empty_list() {
_head = NULL; _head = NULL;
_length = 0; _length = 0;
empty_list(_scan_only_head);
_scan_only_head = NULL;
_scan_only_tail = NULL;
_scan_only_length = 0;
_curr_scan_only = NULL;
empty_list(_survivor_head); empty_list(_survivor_head);
_survivor_head = NULL; _survivor_head = NULL;
_survivor_tail = NULL; _survivor_tail = NULL;
@ -248,11 +199,11 @@ bool YoungList::check_list_well_formed() {
HeapRegion* curr = _head; HeapRegion* curr = _head;
HeapRegion* last = NULL; HeapRegion* last = NULL;
while (curr != NULL) { while (curr != NULL) {
if (!curr->is_young() || curr->is_scan_only()) { if (!curr->is_young()) {
gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
"incorrectly tagged (%d, %d)", "incorrectly tagged (y: %d, surv: %d)",
curr->bottom(), curr->end(), curr->bottom(), curr->end(),
curr->is_young(), curr->is_scan_only()); curr->is_young(), curr->is_survivor());
ret = false; ret = false;
} }
++length; ++length;
@ -267,47 +218,10 @@ bool YoungList::check_list_well_formed() {
length, _length); length, _length);
} }
bool scan_only_ret = true; return ret;
length = 0;
curr = _scan_only_head;
last = NULL;
while (curr != NULL) {
if (!curr->is_young() || curr->is_scan_only()) {
gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
"incorrectly tagged (%d, %d)",
curr->bottom(), curr->end(),
curr->is_young(), curr->is_scan_only());
scan_only_ret = false;
}
++length;
last = curr;
curr = curr->get_next_young_region();
}
scan_only_ret = scan_only_ret && (length == _scan_only_length);
if ( (last != _scan_only_tail) ||
(_scan_only_head == NULL && _scan_only_tail != NULL) ||
(_scan_only_head != NULL && _scan_only_tail == NULL) ) {
gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
scan_only_ret = false;
}
if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
scan_only_ret = false;
}
if (!scan_only_ret) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
length, _scan_only_length);
}
return ret && scan_only_ret;
} }
bool YoungList::check_list_empty(bool ignore_scan_only_list, bool YoungList::check_list_empty(bool check_sample) {
bool check_sample) {
bool ret = true; bool ret = true;
if (_length != 0) { if (_length != 0) {
@ -327,28 +241,7 @@ bool YoungList::check_list_empty(bool ignore_scan_only_list,
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
} }
if (ignore_scan_only_list) return ret;
return ret;
bool scan_only_ret = true;
if (_scan_only_length != 0) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
_scan_only_length);
scan_only_ret = false;
}
if (_scan_only_head != NULL) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
scan_only_ret = false;
}
if (_scan_only_tail != NULL) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
scan_only_ret = false;
}
if (!scan_only_ret) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
}
return ret && scan_only_ret;
} }
void void
@ -365,7 +258,18 @@ YoungList::rs_length_sampling_more() {
void void
YoungList::rs_length_sampling_next() { YoungList::rs_length_sampling_next() {
assert( _curr != NULL, "invariant" ); assert( _curr != NULL, "invariant" );
_sampled_rs_lengths += _curr->rem_set()->occupied(); size_t rs_length = _curr->rem_set()->occupied();
_sampled_rs_lengths += rs_length;
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if (_curr->in_collection_set()) {
// Update the collection set policy information for this region
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
}
_curr = _curr->get_next_young_region(); _curr = _curr->get_next_young_region();
if (_curr == NULL) { if (_curr == NULL) {
_last_sampled_rs_lengths = _sampled_rs_lengths; _last_sampled_rs_lengths = _sampled_rs_lengths;
@ -375,54 +279,46 @@ YoungList::rs_length_sampling_next() {
void void
YoungList::reset_auxilary_lists() { YoungList::reset_auxilary_lists() {
// We could have just "moved" the scan-only list to the young list.
// However, the scan-only list is ordered according to the region
// age in descending order, so, by moving one entry at a time, we
// ensure that it is recreated in ascending order.
guarantee( is_empty(), "young list should be empty" ); guarantee( is_empty(), "young list should be empty" );
assert(check_list_well_formed(), "young list should be well formed"); assert(check_list_well_formed(), "young list should be well formed");
// Add survivor regions to SurvRateGroup. // Add survivor regions to SurvRateGroup.
_g1h->g1_policy()->note_start_adding_survivor_regions(); _g1h->g1_policy()->note_start_adding_survivor_regions();
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
for (HeapRegion* curr = _survivor_head; for (HeapRegion* curr = _survivor_head;
curr != NULL; curr != NULL;
curr = curr->get_next_young_region()) { curr = curr->get_next_young_region()) {
_g1h->g1_policy()->set_region_survivors(curr); _g1h->g1_policy()->set_region_survivors(curr);
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
} }
_g1h->g1_policy()->note_stop_adding_survivor_regions(); _g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head;
_length = _survivor_length;
if (_survivor_head != NULL) { if (_survivor_head != NULL) {
_head = _survivor_head; assert(_survivor_tail != NULL, "cause it shouldn't be");
_length = _survivor_length + _scan_only_length; assert(_survivor_length > 0, "invariant");
_survivor_tail->set_next_young_region(_scan_only_head); _survivor_tail->set_next_young_region(NULL);
} else {
_head = _scan_only_head;
_length = _scan_only_length;
} }
for (HeapRegion* curr = _scan_only_head; // Don't clear the survivor list handles until the start of
curr != NULL; // the next evacuation pause - we need it in order to re-tag
curr = curr->get_next_young_region()) { // the survivor regions from this evacuation pause as 'young'
curr->recalculate_age_in_surv_rate_group(); // at the start of the next.
}
_scan_only_head = NULL;
_scan_only_tail = NULL;
_scan_only_length = 0;
_curr_scan_only = NULL;
_survivor_head = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
assert(check_list_well_formed(), "young list should be well formed"); assert(check_list_well_formed(), "young list should be well formed");
} }
void YoungList::print() { void YoungList::print() {
HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; HeapRegion* lists[] = {_head, _survivor_head};
const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; const char* names[] = {"YOUNG", "SURVIVOR"};
for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
@ -431,7 +327,7 @@ void YoungList::print() {
gclog_or_tty->print_cr(" empty"); gclog_or_tty->print_cr(" empty");
while (curr != NULL) { while (curr != NULL) {
gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, s-o: %d, surv: %d", "age: %4d, y: %d, surv: %d",
curr->bottom(), curr->end(), curr->bottom(), curr->end(),
curr->top(), curr->top(),
curr->prev_top_at_mark_start(), curr->prev_top_at_mark_start(),
@ -439,7 +335,6 @@ void YoungList::print() {
curr->top_at_conc_mark_count(), curr->top_at_conc_mark_count(),
curr->age_in_surv_rate_group_cond(), curr->age_in_surv_rate_group_cond(),
curr->is_young(), curr->is_young(),
curr->is_scan_only(),
curr->is_survivor()); curr->is_survivor());
curr = curr->get_next_young_region(); curr = curr->get_next_young_region();
} }
@ -707,6 +602,12 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// region below. // region below.
if (_cur_alloc_region != NULL) { if (_cur_alloc_region != NULL) {
// We're finished with the _cur_alloc_region. // We're finished with the _cur_alloc_region.
// As we're builing (at least the young portion) of the collection
// set incrementally we'll add the current allocation region to
// the collection set here.
if (_cur_alloc_region->is_young()) {
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
}
_summary_bytes_used += _cur_alloc_region->used(); _summary_bytes_used += _cur_alloc_region->used();
_cur_alloc_region = NULL; _cur_alloc_region = NULL;
} }
@ -820,6 +721,12 @@ void G1CollectedHeap::abandon_cur_alloc_region() {
_free_regions++; _free_regions++;
free_region(_cur_alloc_region); free_region(_cur_alloc_region);
} else { } else {
// As we're builing (at least the young portion) of the collection
// set incrementally we'll add the current allocation region to
// the collection set here.
if (_cur_alloc_region->is_young()) {
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
}
_summary_bytes_used += _cur_alloc_region->used(); _summary_bytes_used += _cur_alloc_region->used();
} }
_cur_alloc_region = NULL; _cur_alloc_region = NULL;
@ -913,20 +820,25 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
} }
if (full && DisableExplicitGC) { if (full && DisableExplicitGC) {
gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
return; return;
} }
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
{ {
IsGCActiveMark x; IsGCActiveMark x;
// Timing // Timing
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
PrintGC, true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */); TraceMemoryManagerStats tms(true /* fullGC */);
@ -970,6 +882,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
tear_down_region_lists(); tear_down_region_lists();
set_used_regions_to_need_zero_fill(); set_used_regions_to_need_zero_fill();
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
empty_young_list(); empty_young_list();
g1_policy()->set_full_young_gcs(true); g1_policy()->set_full_young_gcs(true);
@ -985,12 +906,12 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
ref_processor()->enable_discovery(); ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_soft_refs); ref_processor()->setup_policy(do_clear_all_soft_refs);
// Do collection work // Do collection work
{ {
HandleMark hm; // Discard invalid handles created during gc HandleMark hm; // Discard invalid handles created during gc
G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
} }
// Because freeing humongous regions may have added some unclean // Because freeing humongous regions may have added some unclean
// regions, it is necessary to tear down again before rebuilding. // regions, it is necessary to tear down again before rebuilding.
@ -1053,6 +974,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
perm()->compute_new_size(); perm()->compute_new_size();
} }
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
double end = os::elapsedTime(); double end = os::elapsedTime();
g1_policy()->record_full_collection_end(); g1_policy()->record_full_collection_end();
@ -1071,7 +1001,9 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
_young_list->reset_sampled_info(); _young_list->reset_sampled_info();
assert( check_young_list_empty(false, false), // At this point there should be no regions in the
// entire heap tagged as young.
assert( check_young_list_empty(true /* check_heap */),
"young list should be empty at this point"); "young list should be empty at this point");
} }
@ -1208,6 +1140,9 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
return result; return result;
} }
assert(!collector_policy()->should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more // space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be // complete compaction phase than we've tried so far might be
@ -1565,6 +1500,20 @@ jint G1CollectedHeap::initialize() {
_g1h = this; _g1h = this;
_in_cset_fast_test_length = max_regions();
_in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base -
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the first
// evacuation pause.
clear_cset_fast_test();
// Create the ConcurrentMark data structure and thread. // Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.) // (Must do this late, so that "max_regions" is defined.)
_cm = new ConcurrentMark(heap_rs, (int) max_regions()); _cm = new ConcurrentMark(heap_rs, (int) max_regions());
@ -2185,8 +2134,10 @@ public:
assert(o != NULL, "Huh?"); assert(o != NULL, "Huh?");
if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
o->oop_iterate(&isLive); o->oop_iterate(&isLive);
if (!_hr->obj_allocated_since_prev_marking(o)) if (!_hr->obj_allocated_since_prev_marking(o)) {
_live_bytes += (o->size() * HeapWordSize); size_t obj_size = o->size(); // Make sure we don't overflow
_live_bytes += (obj_size * HeapWordSize);
}
} }
} }
size_t live_bytes() { return _live_bytes; } size_t live_bytes() { return _live_bytes; }
@ -2388,8 +2339,8 @@ void G1CollectedHeap::verify(bool allow_dirty,
print_on(gclog_or_tty, true /* extended */); print_on(gclog_or_tty, true /* extended */);
gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("");
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
concurrent_mark()->print_reachable(use_prev_marking, concurrent_mark()->print_reachable("at-verification-failure",
"failed-verification"); use_prev_marking, false /* all */);
} }
gclog_or_tty->flush(); gclog_or_tty->flush();
} }
@ -2741,25 +2692,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();
size_t start_used_bytes = used(); size_t start_used_bytes = used();
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
_young_list->print();
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
g1_policy()->record_collection_pause_start(start_time_sec, g1_policy()->record_collection_pause_start(start_time_sec,
start_used_bytes); start_used_bytes);
guarantee(_in_cset_fast_test == NULL, "invariant"); #if YOUNG_LIST_VERBOSE
guarantee(_in_cset_fast_test_base == NULL, "invariant"); gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
_in_cset_fast_test_length = max_regions();
_in_cset_fast_test_base =
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
memset(_in_cset_fast_test_base, false,
_in_cset_fast_test_length * sizeof(bool));
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base -
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
#if SCAN_ONLY_VERBOSE
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE #endif // YOUNG_LIST_VERBOSE
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
concurrent_mark()->checkpointRootsInitialPre(); concurrent_mark()->checkpointRootsInitialPre();
@ -2786,12 +2731,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
if (mark_in_progress()) if (mark_in_progress())
concurrent_mark()->newCSet(); concurrent_mark()->newCSet();
// Now choose the CS. #if YOUNG_LIST_VERBOSE
g1_policy()->choose_collection_set(); gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
_young_list->print();
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
// We may abandon a pause if we find no region that will fit in the MMU // Now choose the CS. We may abandon a pause if we find no
// pause. // region that will fit in the MMU pause.
bool abandoned = (g1_policy()->collection_set() == NULL); bool abandoned = g1_policy()->choose_collection_set();
// Nothing to do if we were unable to choose a collection set. // Nothing to do if we were unable to choose a collection set.
if (!abandoned) { if (!abandoned) {
@ -2809,40 +2757,64 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// Actually do the work... // Actually do the work...
evacuate_collection_set(); evacuate_collection_set();
free_collection_set(g1_policy()->collection_set()); free_collection_set(g1_policy()->collection_set());
g1_policy()->clear_collection_set(); g1_policy()->clear_collection_set();
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
// this is more for peace of mind; we're nulling them here and
// we're expecting them to be null at the beginning of the next GC
_in_cset_fast_test = NULL;
_in_cset_fast_test_base = NULL;
cleanup_surviving_young_words(); cleanup_surviving_young_words();
// Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
_young_list->reset_sampled_info(); _young_list->reset_sampled_info();
assert(check_young_list_empty(true),
"young list should be empty");
#if SCAN_ONLY_VERBOSE // Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list.
// Survivor regions will fail the !is_young() check.
assert(check_young_list_empty(false /* check_heap */),
"young list should be empty");
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(), g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(), _young_list->first_survivor_region(),
_young_list->last_survivor_region()); _young_list->last_survivor_region());
_young_list->reset_auxilary_lists(); _young_list->reset_auxilary_lists();
} }
} else { } else {
if (_in_cset_fast_test != NULL) { // We have abandoned the current collection. This can only happen
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't"); // if we're not doing young or partially young collections, and
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); // we didn't find an old region that we're able to collect within
// this is more for peace of mind; we're nulling them here and // the allowed time.
// we're expecting them to be null at the beginning of the next GC
_in_cset_fast_test = NULL; assert(g1_policy()->collection_set() == NULL, "should be");
_in_cset_fast_test_base = NULL; assert(_young_list->length() == 0, "because it should be");
}
// This should be a no-op.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
// Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
// This looks confusing, because the DPT should really be empty // This looks confusing, because the DPT should really be empty
// at this point -- since we have not done any collection work, // at this point -- since we have not done any collection work,
// there should not be any derived pointers in the table to update; // there should not be any derived pointers in the table to update;
@ -2876,9 +2848,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
doConcurrentMark(); doConcurrentMark();
} }
#if SCAN_ONLY_VERBOSE #if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
@ -2936,6 +2910,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
} }
} }
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
{
size_t gclab_word_size;
switch (purpose) {
case GCAllocForSurvived:
gclab_word_size = YoungPLABSize;
break;
case GCAllocForTenured:
gclab_word_size = OldPLABSize;
break;
default:
assert(false, "unknown GCAllocPurpose");
gclab_word_size = OldPLABSize;
break;
}
return gclab_word_size;
}
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
// make sure we don't call set_gc_alloc_region() multiple times on // make sure we don't call set_gc_alloc_region() multiple times on
@ -3109,6 +3102,11 @@ void G1CollectedHeap::get_gc_alloc_regions() {
} else { } else {
// the region was retained from the last collection // the region was retained from the last collection
++_gc_alloc_region_counts[ap]; ++_gc_alloc_region_counts[ap];
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT,
alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
}
} }
if (alloc_region != NULL) { if (alloc_region != NULL) {
@ -3665,6 +3663,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
_g1_rem(g1h->g1_rem_set()), _g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num), _hash_seed(17), _queue_num(queue_num),
_term_attempts(0), _term_attempts(0),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false), _age_table(false),
#if G1_DETAILED_STATS #if G1_DETAILED_STATS
_pushes(0), _pops(0), _steals(0), _pushes(0), _pops(0), _steals(0),
@ -3691,6 +3691,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
_overflowed_refs = new OverflowQueue(10); _overflowed_refs = new OverflowQueue(10);
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
_start = os::elapsedTime(); _start = os::elapsedTime();
} }
@ -3988,16 +3991,13 @@ public:
OopsInHeapRegionClosure *scan_root_cl; OopsInHeapRegionClosure *scan_root_cl;
OopsInHeapRegionClosure *scan_perm_cl; OopsInHeapRegionClosure *scan_perm_cl;
OopsInHeapRegionClosure *scan_so_cl;
if (_g1h->g1_policy()->during_initial_mark_pause()) { if (_g1h->g1_policy()->during_initial_mark_pause()) {
scan_root_cl = &scan_mark_root_cl; scan_root_cl = &scan_mark_root_cl;
scan_perm_cl = &scan_mark_perm_cl; scan_perm_cl = &scan_mark_perm_cl;
scan_so_cl = &scan_mark_heap_rs_cl;
} else { } else {
scan_root_cl = &only_scan_root_cl; scan_root_cl = &only_scan_root_cl;
scan_perm_cl = &only_scan_perm_cl; scan_perm_cl = &only_scan_perm_cl;
scan_so_cl = &only_scan_heap_rs_cl;
} }
pss.start_strong_roots(); pss.start_strong_roots();
@ -4005,7 +4005,6 @@ public:
SharedHeap::SO_AllClasses, SharedHeap::SO_AllClasses,
scan_root_cl, scan_root_cl,
&push_heap_rs_cl, &push_heap_rs_cl,
scan_so_cl,
scan_perm_cl, scan_perm_cl,
i); i);
pss.end_strong_roots(); pss.end_strong_roots();
@ -4067,7 +4066,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
SharedHeap::ScanningOption so, SharedHeap::ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInHeapRegionClosure* scan_so,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
int worker_i) { int worker_i) {
// First scan the strong roots, including the perm gen. // First scan the strong roots, including the perm gen.
@ -4087,6 +4085,7 @@ g1_process_strong_roots(bool collecting_perm_gen,
&buf_scan_non_heap_roots, &buf_scan_non_heap_roots,
&eager_scan_code_roots, &eager_scan_code_roots,
&buf_scan_perm); &buf_scan_perm);
// Finish up any enqueued closure apps. // Finish up any enqueued closure apps.
buf_scan_non_heap_roots.done(); buf_scan_non_heap_roots.done();
buf_scan_perm.done(); buf_scan_perm.done();
@ -4109,9 +4108,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
// XXX What should this be doing in the parallel case? // XXX What should this be doing in the parallel case?
g1_policy()->record_collection_pause_end_CH_strong_roots(); g1_policy()->record_collection_pause_end_CH_strong_roots();
if (scan_so != NULL) {
scan_scan_only_set(scan_so, worker_i);
}
// Now scan the complement of the collection set. // Now scan the complement of the collection set.
if (scan_rs != NULL) { if (scan_rs != NULL) {
g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
@ -4124,54 +4120,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
_process_strong_tasks->all_tasks_completed(); _process_strong_tasks->all_tasks_completed();
} }
void
G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
OopsInHeapRegionClosure* oc,
int worker_i) {
HeapWord* startAddr = r->bottom();
HeapWord* endAddr = r->used_region().end();
oc->set_region(r);
HeapWord* p = r->bottom();
HeapWord* t = r->top();
guarantee( p == r->next_top_at_mark_start(), "invariant" );
while (p < t) {
oop obj = oop(p);
p += obj->oop_iterate(oc);
}
}
void
G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
int worker_i) {
double start = os::elapsedTime();
BufferingOopsInHeapRegionClosure boc(oc);
FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
OopsInHeapRegionClosure *foc;
if (g1_policy()->during_initial_mark_pause())
foc = &scan_and_mark;
else
foc = &scan_only;
HeapRegion* hr;
int n = 0;
while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
scan_scan_only_region(hr, foc, worker_i);
++n;
}
boc.done();
double closure_app_s = boc.closure_app_seconds();
g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
g1_policy()->record_scan_only_time(worker_i, ms, n);
}
void void
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
OopClosure* non_root_closure) { OopClosure* non_root_closure) {
@ -4370,17 +4318,14 @@ void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRe
class G1ParCleanupCTTask : public AbstractGangTask { class G1ParCleanupCTTask : public AbstractGangTask {
CardTableModRefBS* _ct_bs; CardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
HeapRegion* volatile _so_head;
HeapRegion* volatile _su_head; HeapRegion* volatile _su_head;
public: public:
G1ParCleanupCTTask(CardTableModRefBS* ct_bs, G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
G1CollectedHeap* g1h, G1CollectedHeap* g1h,
HeapRegion* scan_only_list,
HeapRegion* survivor_list) : HeapRegion* survivor_list) :
AbstractGangTask("G1 Par Cleanup CT Task"), AbstractGangTask("G1 Par Cleanup CT Task"),
_ct_bs(ct_bs), _ct_bs(ct_bs),
_g1h(g1h), _g1h(g1h),
_so_head(scan_only_list),
_su_head(survivor_list) _su_head(survivor_list)
{ } { }
@ -4389,14 +4334,13 @@ public:
while (r = _g1h->pop_dirty_cards_region()) { while (r = _g1h->pop_dirty_cards_region()) {
clear_cards(r); clear_cards(r);
} }
// Redirty the cards of the scan-only and survivor regions. // Redirty the cards of the survivor regions.
dirty_list(&this->_so_head);
dirty_list(&this->_su_head); dirty_list(&this->_su_head);
} }
void clear_cards(HeapRegion* r) { void clear_cards(HeapRegion* r) {
// Cards for Survivor and Scan-Only regions will be dirtied later. // Cards for Survivor regions will be dirtied later.
if (!r->is_scan_only() && !r->is_survivor()) { if (!r->is_survivor()) {
_ct_bs->clear(MemRegion(r->bottom(), r->end())); _ct_bs->clear(MemRegion(r->bottom(), r->end()));
} }
} }
@ -4429,7 +4373,7 @@ public:
virtual bool doHeapRegion(HeapRegion* r) virtual bool doHeapRegion(HeapRegion* r)
{ {
MemRegion mr(r->bottom(), r->end()); MemRegion mr(r->bottom(), r->end());
if (r->is_scan_only() || r->is_survivor()) { if (r->is_survivor()) {
_ct_bs->verify_dirty_region(mr); _ct_bs->verify_dirty_region(mr);
} else { } else {
_ct_bs->verify_clean_region(mr); _ct_bs->verify_clean_region(mr);
@ -4445,8 +4389,8 @@ void G1CollectedHeap::cleanUpCardTable() {
// Iterate over the dirty cards region list. // Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this, G1ParCleanupCTTask cleanup_task(ct_bs, this,
_young_list->first_scan_only_region(),
_young_list->first_survivor_region()); _young_list->first_survivor_region());
if (ParallelGCThreads > 0) { if (ParallelGCThreads > 0) {
set_par_threads(workers()->total_workers()); set_par_threads(workers()->total_workers());
workers()->run_task(&cleanup_task); workers()->run_task(&cleanup_task);
@ -4462,12 +4406,12 @@ void G1CollectedHeap::cleanUpCardTable() {
} }
r->set_next_dirty_cards_region(NULL); r->set_next_dirty_cards_region(NULL);
} }
// now, redirty the cards of the scan-only and survivor regions // now, redirty the cards of the survivor regions
// (it seemed faster to do it this way, instead of iterating over // (it seemed faster to do it this way, instead of iterating over
// all regions and then clearing / dirtying as appropriate) // all regions and then clearing / dirtying as appropriate)
dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
} }
double elapsed = os::elapsedTime() - start; double elapsed = os::elapsedTime() - start;
g1_policy()->record_clear_ct_time( elapsed * 1000.0); g1_policy()->record_clear_ct_time( elapsed * 1000.0);
#ifndef PRODUCT #ifndef PRODUCT
@ -4488,6 +4432,11 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
double young_time_ms = 0.0; double young_time_ms = 0.0;
double non_young_time_ms = 0.0; double non_young_time_ms = 0.0;
// Since the collection set is a superset of the the young list,
// all we need to do to clear the young list is clear its
// head and length, and unlink any young regions in the code below
_young_list->clear();
G1CollectorPolicy* policy = g1_policy(); G1CollectorPolicy* policy = g1_policy();
double start_sec = os::elapsedTime(); double start_sec = os::elapsedTime();
@ -4531,6 +4480,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
size_t words_survived = _surviving_young_words[index]; size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived); cur->record_surv_words_in_group(words_survived);
// At this point the we have 'popped' cur from the collection set
// (linked via next_in_collection_set()) but it is still in the
// young list (linked via next_young_region()). Clear the
// _next_young_region field.
cur->set_next_young_region(NULL);
} else { } else {
int index = cur->young_index_in_cset(); int index = cur->young_index_in_cset();
guarantee( index == -1, "invariant" ); guarantee( index == -1, "invariant" );
@ -4546,7 +4501,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
"Should not have empty regions in a CS."); "Should not have empty regions in a CS.");
free_region(cur); free_region(cur);
} else { } else {
guarantee( !cur->is_scan_only(), "should not be scan only" );
cur->uninstall_surv_rate_group(); cur->uninstall_surv_rate_group();
if (cur->is_young()) if (cur->is_young())
cur->set_young_index_in_cset(-1); cur->set_young_index_in_cset(-1);
@ -4570,6 +4524,27 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
policy->record_non_young_free_cset_time_ms(non_young_time_ms); policy->record_non_young_free_cset_time_ms(non_young_time_ms);
} }
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.
// This is only called when we're doing a full collection
// and is immediately followed by the tearing down of the young list.
void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
HeapRegion* cur = cs_head;
while (cur != NULL) {
HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS");
cur->set_next_in_collection_set(NULL);
cur->set_in_collection_set(false);
cur->set_young_index_in_cset(-1);
cur = next;
}
}
HeapRegion* HeapRegion*
G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
assert(ZF_mon->owned_by_self(), "Precondition"); assert(ZF_mon->owned_by_self(), "Precondition");
@ -4936,12 +4911,10 @@ public:
bool success() { return _success; } bool success() { return _success; }
}; };
bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
bool check_sample) { bool ret = _young_list->check_list_empty(check_sample);
bool ret = true;
ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); if (check_heap) {
if (!ignore_scan_only_list) {
NoYoungRegionsClosure closure; NoYoungRegionsClosure closure;
heap_region_iterate(&closure); heap_region_iterate(&closure);
ret = ret && closure.success(); ret = ret && closure.success();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,33 +81,29 @@ private:
HeapRegion* _head; HeapRegion* _head;
HeapRegion* _scan_only_head; HeapRegion* _survivor_head;
HeapRegion* _scan_only_tail; HeapRegion* _survivor_tail;
HeapRegion* _curr;
size_t _length; size_t _length;
size_t _scan_only_length; size_t _survivor_length;
size_t _last_sampled_rs_lengths; size_t _last_sampled_rs_lengths;
size_t _sampled_rs_lengths; size_t _sampled_rs_lengths;
HeapRegion* _curr;
HeapRegion* _curr_scan_only;
HeapRegion* _survivor_head; void empty_list(HeapRegion* list);
HeapRegion* _survivor_tail;
size_t _survivor_length;
void empty_list(HeapRegion* list);
public: public:
YoungList(G1CollectedHeap* g1h); YoungList(G1CollectedHeap* g1h);
void push_region(HeapRegion* hr); void push_region(HeapRegion* hr);
void add_survivor_region(HeapRegion* hr); void add_survivor_region(HeapRegion* hr);
HeapRegion* pop_region();
void empty_list(); void empty_list();
bool is_empty() { return _length == 0; } bool is_empty() { return _length == 0; }
size_t length() { return _length; } size_t length() { return _length; }
size_t scan_only_length() { return _scan_only_length; } size_t survivor_length() { return _survivor_length; }
size_t survivor_length() { return _survivor_length; }
void rs_length_sampling_init(); void rs_length_sampling_init();
bool rs_length_sampling_more(); bool rs_length_sampling_more();
@ -120,22 +116,21 @@ public:
// for development purposes // for development purposes
void reset_auxilary_lists(); void reset_auxilary_lists();
void clear() { _head = NULL; _length = 0; }
void clear_survivors() {
_survivor_head = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
}
HeapRegion* first_region() { return _head; } HeapRegion* first_region() { return _head; }
HeapRegion* first_scan_only_region() { return _scan_only_head; }
HeapRegion* first_survivor_region() { return _survivor_head; } HeapRegion* first_survivor_region() { return _survivor_head; }
HeapRegion* last_survivor_region() { return _survivor_tail; } HeapRegion* last_survivor_region() { return _survivor_tail; }
HeapRegion* par_get_next_scan_only_region() {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
HeapRegion* ret = _curr_scan_only;
if (ret != NULL)
_curr_scan_only = ret->get_next_young_region();
return ret;
}
// debugging // debugging
bool check_list_well_formed(); bool check_list_well_formed();
bool check_list_empty(bool ignore_scan_only_list, bool check_list_empty(bool check_sample = true);
bool check_sample = true);
void print(); void print();
}; };
@ -232,6 +227,9 @@ private:
// current collection. // current collection.
HeapRegion* _gc_alloc_region_list; HeapRegion* _gc_alloc_region_list;
// Determines PLAB size for a particular allocation purpose.
static size_t desired_plab_sz(GCAllocPurpose purpose);
// When called by par thread, require par_alloc_during_gc_lock() to be held. // When called by par thread, require par_alloc_during_gc_lock() to be held.
void push_gc_alloc_region(HeapRegion* hr); void push_gc_alloc_region(HeapRegion* hr);
@ -402,8 +400,7 @@ public:
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant"); assert(r->in_collection_set(), "invariant");
int index = r->hrs_index(); int index = r->hrs_index();
assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
"invariant");
assert(!_in_cset_fast_test_base[index], "invariant"); assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true; _in_cset_fast_test_base[index] = true;
} }
@ -428,6 +425,12 @@ public:
} }
} }
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false,
_in_cset_fast_test_length * sizeof(bool));
}
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).
@ -473,6 +476,10 @@ protected:
// regions. // regions.
void free_collection_set(HeapRegion* cs_head); void free_collection_set(HeapRegion* cs_head);
// Abandon the current collection set without recording policy
// statistics or updating free lists.
void abandon_collection_set(HeapRegion* cs_head);
// Applies "scan_non_heap_roots" to roots outside the heap, // Applies "scan_non_heap_roots" to roots outside the heap,
// "scan_rs" to roots inside the heap (having done "set_region" to // "scan_rs" to roots inside the heap (having done "set_region" to
// indicate the region in which the root resides), and does "scan_perm" // indicate the region in which the root resides), and does "scan_perm"
@ -485,16 +492,9 @@ protected:
SharedHeap::ScanningOption so, SharedHeap::ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInHeapRegionClosure* scan_so,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
int worker_i); int worker_i);
void scan_scan_only_set(OopsInHeapRegionClosure* oc,
int worker_i);
void scan_scan_only_region(HeapRegion* hr,
OopsInHeapRegionClosure* oc,
int worker_i);
// Apply "blk" to all the weak roots of the system. These include // Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table, // JNI weak roots, the code cache, system dictionary, symbol table,
// string table, and referents of reachable weak refs. // string table, and referents of reachable weak refs.
@ -1133,36 +1133,14 @@ public:
void set_region_short_lived_locked(HeapRegion* hr); void set_region_short_lived_locked(HeapRegion* hr);
// add appropriate methods for any other surv rate groups // add appropriate methods for any other surv rate groups
void young_list_rs_length_sampling_init() { YoungList* young_list() { return _young_list; }
_young_list->rs_length_sampling_init();
}
bool young_list_rs_length_sampling_more() {
return _young_list->rs_length_sampling_more();
}
void young_list_rs_length_sampling_next() {
_young_list->rs_length_sampling_next();
}
size_t young_list_sampled_rs_lengths() {
return _young_list->sampled_rs_lengths();
}
size_t young_list_length() { return _young_list->length(); }
size_t young_list_scan_only_length() {
return _young_list->scan_only_length(); }
HeapRegion* pop_region_from_young_list() {
return _young_list->pop_region();
}
HeapRegion* young_list_first_region() {
return _young_list->first_region();
}
// debugging // debugging
bool check_young_list_well_formed() { bool check_young_list_well_formed() {
return _young_list->check_list_well_formed(); return _young_list->check_list_well_formed();
} }
bool check_young_list_empty(bool ignore_scan_only_list,
bool check_young_list_empty(bool check_heap,
bool check_sample = true); bool check_sample = true);
// *** Stuff related to concurrent marking. It's not clear to me that so // *** Stuff related to concurrent marking. It's not clear to me that so
@ -1367,12 +1345,18 @@ private:
return BitsPerWord << shifter(); return BitsPerWord << shifter();
} }
static size_t gclab_word_size() { size_t gclab_word_size() const {
return G1ParallelGCAllocBufferSize / HeapWordSize; return _gclab_word_size;
} }
static size_t bitmap_size_in_bits() { // Calculates actual GCLab size in words
size_t bits_in_bitmap = gclab_word_size() >> shifter(); size_t gclab_real_word_size() const {
return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
/ BitsPerWord;
}
static size_t bitmap_size_in_bits(size_t gclab_word_size) {
size_t bits_in_bitmap = gclab_word_size >> shifter();
// We are going to ensure that the beginning of a word in this // We are going to ensure that the beginning of a word in this
// bitmap also corresponds to the beginning of a word in the // bitmap also corresponds to the beginning of a word in the
// global marking bitmap. To handle the case where a GCLab // global marking bitmap. To handle the case where a GCLab
@ -1382,13 +1366,13 @@ private:
return bits_in_bitmap + BitsPerWord - 1; return bits_in_bitmap + BitsPerWord - 1;
} }
public: public:
GCLabBitMap(HeapWord* heap_start) GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
: BitMap(bitmap_size_in_bits()), : BitMap(bitmap_size_in_bits(gclab_word_size)),
_cm(G1CollectedHeap::heap()->concurrent_mark()), _cm(G1CollectedHeap::heap()->concurrent_mark()),
_shifter(shifter()), _shifter(shifter()),
_bitmap_word_covers_words(bitmap_word_covers_words()), _bitmap_word_covers_words(bitmap_word_covers_words()),
_heap_start(heap_start), _heap_start(heap_start),
_gclab_word_size(gclab_word_size()), _gclab_word_size(gclab_word_size),
_real_start_word(NULL), _real_start_word(NULL),
_real_end_word(NULL), _real_end_word(NULL),
_start_word(NULL) _start_word(NULL)
@ -1483,7 +1467,7 @@ public:
mark_bitmap->mostly_disjoint_range_union(this, mark_bitmap->mostly_disjoint_range_union(this,
0, // always start from the start of the bitmap 0, // always start from the start of the bitmap
_start_word, _start_word,
size_in_words()); gclab_real_word_size());
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
#ifndef PRODUCT #ifndef PRODUCT
@ -1495,9 +1479,10 @@ public:
} }
} }
static size_t bitmap_size_in_words() { size_t bitmap_size_in_words() const {
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
} }
}; };
class G1ParGCAllocBuffer: public ParGCAllocBuffer { class G1ParGCAllocBuffer: public ParGCAllocBuffer {
@ -1507,10 +1492,10 @@ private:
GCLabBitMap _bitmap; GCLabBitMap _bitmap;
public: public:
G1ParGCAllocBuffer() : G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), ParGCAllocBuffer(gclab_word_size),
_during_marking(G1CollectedHeap::heap()->mark_in_progress()), _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
_bitmap(G1CollectedHeap::heap()->reserved_region().start()), _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
_retired(false) _retired(false)
{ } { }
@ -1549,8 +1534,10 @@ protected:
typedef GrowableArray<StarTask> OverflowQueue; typedef GrowableArray<StarTask> OverflowQueue;
OverflowQueue* _overflowed_refs; OverflowQueue* _overflowed_refs;
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; G1ParGCAllocBuffer _surviving_alloc_buffer;
ageTable _age_table; G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
size_t _alloc_buffer_waste; size_t _alloc_buffer_waste;
size_t _undo_waste; size_t _undo_waste;
@ -1619,7 +1606,7 @@ public:
ageTable* age_table() { return &_age_table; } ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return &_alloc_buffers[purpose]; return _alloc_buffers[purpose];
} }
size_t alloc_buffer_waste() { return _alloc_buffer_waste; } size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
@ -1684,15 +1671,15 @@ public:
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL; HeapWord* obj = NULL;
if (word_sz * 100 < size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) * if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
assert(gclab_word_size == alloc_buf->word_sz(),
"dynamic resizing is not supported");
add_to_alloc_buffer_waste(alloc_buf->words_remaining()); add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false, false); alloc_buf->retire(false, false);
HeapWord* buf = HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
if (buf == NULL) return NULL; // Let caller handle allocation failure. if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise. // Otherwise.
alloc_buf->set_buf(buf); alloc_buf->set_buf(buf);
@ -1786,9 +1773,9 @@ public:
void retire_alloc_buffers() { void retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap].words_remaining(); size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste); add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap].retire(true, false); _alloc_buffers[ap]->retire(true, false);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,6 @@ class MainBodySummary: public CHeapObj {
define_num_seq(parallel) // parallel only define_num_seq(parallel) // parallel only
define_num_seq(ext_root_scan) define_num_seq(ext_root_scan)
define_num_seq(mark_stack_scan) define_num_seq(mark_stack_scan)
define_num_seq(scan_only)
define_num_seq(update_rs) define_num_seq(update_rs)
define_num_seq(scan_rs) define_num_seq(scan_rs)
define_num_seq(scan_new_refs) // Only for temp use; added to define_num_seq(scan_new_refs) // Only for temp use; added to
@ -174,8 +173,6 @@ protected:
double* _par_last_ext_root_scan_times_ms; double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms; double* _par_last_mark_stack_scan_times_ms;
double* _par_last_scan_only_times_ms;
double* _par_last_scan_only_regions_scanned;
double* _par_last_update_rs_start_times_ms; double* _par_last_update_rs_start_times_ms;
double* _par_last_update_rs_times_ms; double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers; double* _par_last_update_rs_processed_buffers;
@ -196,7 +193,6 @@ protected:
bool _adaptive_young_list_length; bool _adaptive_young_list_length;
size_t _young_list_min_length; size_t _young_list_min_length;
size_t _young_list_target_length; size_t _young_list_target_length;
size_t _young_list_so_prefix_length;
size_t _young_list_fixed_length; size_t _young_list_fixed_length;
size_t _young_cset_length; size_t _young_cset_length;
@ -234,7 +230,6 @@ private:
TruncatedSeq* _pending_card_diff_seq; TruncatedSeq* _pending_card_diff_seq;
TruncatedSeq* _rs_length_diff_seq; TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq; TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_per_scan_only_region_ms_seq;
TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq; TruncatedSeq* _cost_per_entry_ms_seq;
@ -249,19 +244,16 @@ private:
TruncatedSeq* _rs_lengths_seq; TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq; TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
TruncatedSeq* _young_gc_eff_seq; TruncatedSeq* _young_gc_eff_seq;
TruncatedSeq* _max_conc_overhead_seq; TruncatedSeq* _max_conc_overhead_seq;
size_t _recorded_young_regions; size_t _recorded_young_regions;
size_t _recorded_scan_only_regions;
size_t _recorded_non_young_regions; size_t _recorded_non_young_regions;
size_t _recorded_region_num; size_t _recorded_region_num;
size_t _free_regions_at_end_of_collection; size_t _free_regions_at_end_of_collection;
size_t _scan_only_regions_at_end_of_collection;
size_t _recorded_rs_lengths; size_t _recorded_rs_lengths;
size_t _max_rs_lengths; size_t _max_rs_lengths;
@ -277,7 +269,6 @@ private:
double _predicted_survival_ratio; double _predicted_survival_ratio;
double _predicted_rs_update_time_ms; double _predicted_rs_update_time_ms;
double _predicted_rs_scan_time_ms; double _predicted_rs_scan_time_ms;
double _predicted_scan_only_scan_time_ms;
double _predicted_object_copy_time_ms; double _predicted_object_copy_time_ms;
double _predicted_constant_other_time_ms; double _predicted_constant_other_time_ms;
double _predicted_young_other_time_ms; double _predicted_young_other_time_ms;
@ -344,8 +335,6 @@ public:
bool verify_young_ages(); bool verify_young_ages();
#endif // PRODUCT #endif // PRODUCT
void tag_scan_only(size_t short_lived_scan_only_length);
double get_new_prediction(TruncatedSeq* seq) { double get_new_prediction(TruncatedSeq* seq) {
return MAX2(seq->davg() + sigma() * seq->dsd(), return MAX2(seq->davg() + sigma() * seq->dsd(),
seq->davg() * confidence_factor(seq->num())); seq->davg() * confidence_factor(seq->num()));
@ -431,23 +420,6 @@ public:
get_new_prediction(_partially_young_cost_per_entry_ms_seq); get_new_prediction(_partially_young_cost_per_entry_ms_seq);
} }
double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
return 1.5 * (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_seq);
else
return (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
}
double predict_scan_only_time_ms(size_t scan_only_region_num) {
if (_in_marking_window_im)
return predict_scan_only_time_ms_during_cm(scan_only_region_num);
else
return (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_seq);
}
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
if (_cost_per_byte_ms_during_cm_seq->num() < 3) if (_cost_per_byte_ms_during_cm_seq->num() < 3)
return 1.1 * (double) bytes_to_copy * return 1.1 * (double) bytes_to_copy *
@ -490,24 +462,21 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr); size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
// for use by: calculate_optimal_so_length(length) // for use by: calculate_young_list_target_length(rs_length)
void predict_gc_eff(size_t young_region_num, bool predict_will_fit(size_t young_region_num,
size_t so_length, double base_time_ms,
double base_time_ms, size_t init_free_regions,
double *gc_eff, double target_pause_time_ms);
double *pause_time_ms);
// for use by: calculate_young_list_target_config(rs_length)
bool predict_gc_eff(size_t young_region_num,
size_t so_length,
double base_time_with_so_ms,
size_t init_free_regions,
double target_pause_time_ms,
double* gc_eff);
void start_recording_regions(); void start_recording_regions();
void record_cset_region(HeapRegion* hr, bool young); void record_cset_region_info(HeapRegion* hr, bool young);
void record_scan_only_regions(size_t scan_only_length); void record_non_young_cset_region(HeapRegion* hr);
void set_recorded_young_regions(size_t n_regions);
void set_recorded_young_bytes(size_t bytes);
void set_recorded_rs_lengths(size_t rs_lengths);
void set_predicted_bytes_to_copy(size_t bytes);
void end_recording_regions(); void end_recording_regions();
void record_vtime_diff_ms(double vtime_diff_ms) { void record_vtime_diff_ms(double vtime_diff_ms) {
@ -638,11 +607,74 @@ protected:
void update_recent_gc_times(double end_time_sec, double elapsed_ms); void update_recent_gc_times(double end_time_sec, double elapsed_ms);
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _collection_set; HeapRegion* _collection_set;
// The number of regions in the collection set. Set from the incrementally
// built collection set at the start of an evacuation pause.
size_t _collection_set_size; size_t _collection_set_size;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
size_t _collection_set_bytes_used_before; size_t _collection_set_bytes_used_before;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_cset_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_cset_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of regions in the incrementally built collection set.
// Used to set _collection_set_size at the start of an evacuation
// pause.
size_t _inc_cset_size;
// Used as the index in the surving young words structure
// which tracks the amount of space, for each young region,
// that survives the pause.
size_t _inc_cset_young_index;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_cset_bytes_used_before;
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
// The number of recorded used bytes in the young regions
// of the collection set. This is the sum of the used() bytes
// of retired young regions in the collection set.
size_t _inc_cset_recorded_young_bytes;
// The RSet lengths recorded for regions in the collection set
// (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_recorded_rs_lengths;
// The predicted elapsed time it will take to collect the regions
// in the collection set (updated by the periodic sampling of the
// regions in the young list/collection set).
double _inc_cset_predicted_elapsed_time_ms;
// The predicted bytes to copy for the regions in the collection
// set (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_predicted_bytes_to_copy;
// Info about marking. // Info about marking.
int _n_marks; // Sticky at 2, so we know when we've done at least 2. int _n_marks; // Sticky at 2, so we know when we've done at least 2.
@ -761,9 +793,8 @@ protected:
double _mark_closure_time_ms; double _mark_closure_time_ms;
void calculate_young_list_min_length(); void calculate_young_list_min_length();
void calculate_young_list_target_config(); void calculate_young_list_target_length();
void calculate_young_list_target_config(size_t rs_lengths); void calculate_young_list_target_length(size_t rs_lengths);
size_t calculate_optimal_so_length(size_t young_list_length);
public: public:
@ -868,11 +899,6 @@ public:
_par_last_mark_stack_scan_times_ms[worker_i] = ms; _par_last_mark_stack_scan_times_ms[worker_i] = ms;
} }
void record_scan_only_time(int worker_i, double ms, int n) {
_par_last_scan_only_times_ms[worker_i] = ms;
_par_last_scan_only_regions_scanned[worker_i] = (double) n;
}
void record_satb_drain_time(double ms) { void record_satb_drain_time(double ms) {
_cur_satb_drain_time_ms = ms; _cur_satb_drain_time_ms = ms;
_satb_drain_time_set = true; _satb_drain_time_set = true;
@ -987,20 +1013,67 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
virtual void choose_collection_set() = 0; virtual bool choose_collection_set() = 0;
void clear_collection_set() { _collection_set = NULL; }
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.
HeapRegion* collection_set() { return _collection_set; } HeapRegion* collection_set() { return _collection_set; }
void clear_collection_set() { _collection_set = NULL; }
// The number of elements in the current collection set. // The number of elements in the current collection set.
size_t collection_set_size() { return _collection_set_size; } size_t collection_set_size() { return _collection_set_size; }
// Add "hr" to the CS. // Add "hr" to the CS.
void add_to_collection_set(HeapRegion* hr); void add_to_collection_set(HeapRegion* hr);
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_cset_head() { return _inc_cset_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// The number of elements in the incrementally built collection set.
size_t inc_cset_size() { return _inc_cset_size; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add/remove information about hr to the aggregated information
// for the incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
void remove_from_incremental_cset_info(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_region_to_incremental_cset_common(HeapRegion* hr);
public:
// Add hr to the LHS of the incremental collection set.
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_region_to_incremental_cset_rhs(HeapRegion* hr);
#ifndef PRODUCT
void print_collection_set(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
@ -1191,7 +1264,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
// If the estimated is less then desirable, resize if possible. // If the estimated is less then desirable, resize if possible.
void expand_if_possible(size_t numRegions); void expand_if_possible(size_t numRegions);
virtual void choose_collection_set(); virtual bool choose_collection_set();
virtual void record_collection_pause_start(double start_time_sec, virtual void record_collection_pause_start(double start_time_sec,
size_t start_used); size_t start_used);
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,12 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs) { bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
if (sh->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earler");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep // hook up weak ref data so it can be used during Mark-Sweep
assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL"); assert(rp != NULL, "should be non-NULL");
@ -44,7 +50,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
// Increment the invocation count for the permanent generation, since it is // Increment the invocation count for the permanent generation, since it is
// implicitly collected whenever we do a full mark sweep collection. // implicitly collected whenever we do a full mark sweep collection.
SharedHeap* sh = SharedHeap::heap();
sh->perm_gen()->stat_record()->invocations++; sh->perm_gen()->stat_record()->invocations++;
bool marked_for_unloading = false; bool marked_for_unloading = false;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,9 +28,6 @@
#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
\ \
product(intx, G1ParallelGCAllocBufferSize, 8*K, \
"Size of parallel G1 allocation buffers in to-space.") \
\
product(intx, G1ConfidencePercent, 50, \ product(intx, G1ConfidencePercent, 50, \
"Confidence level for MMU/pause predictions") \ "Confidence level for MMU/pause predictions") \
\ \
@ -229,10 +226,6 @@
"the number of regions for which we'll print a surv rate " \ "the number of regions for which we'll print a surv rate " \
"summary.") \ "summary.") \
\ \
develop(bool, G1UseScanOnlyPrefix, false, \
"It determines whether the system will calculate an optimum " \
"scan-only set.") \
\
product(intx, G1ReservePercent, 10, \ product(intx, G1ReservePercent, 10, \
"It determines the minimum reserve we should have in the heap " \ "It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \ "to minimize the probability of promotion failure.") \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,6 +75,16 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); }
void print_object(outputStream* out, oop obj) {
#ifdef PRODUCT
klassOop k = obj->klass();
const char* class_name = instanceKlass::cast(k)->external_name();
out->print_cr("class name %s", class_name);
#else // PRODUCT
obj->print_on(out);
#endif // PRODUCT
}
template <class T> void do_oop_work(T* p) { template <class T> void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition"); assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
@ -90,21 +100,29 @@ public:
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
} }
if (!_g1h->is_in_closed_subset(obj)) { if (!_g1h->is_in_closed_subset(obj)) {
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
gclog_or_tty->print_cr("Field "PTR_FORMAT gclog_or_tty->print_cr("Field "PTR_FORMAT
" of live obj "PTR_FORMAT " of live obj "PTR_FORMAT" in region "
" points to obj "PTR_FORMAT "["PTR_FORMAT", "PTR_FORMAT")",
" not in the heap.", p, (void*) _containing_obj,
p, (void*) _containing_obj, (void*) obj); from->bottom(), from->end());
print_object(gclog_or_tty, _containing_obj);
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
(void*) obj);
} else { } else {
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
gclog_or_tty->print_cr("Field "PTR_FORMAT gclog_or_tty->print_cr("Field "PTR_FORMAT
" of live obj "PTR_FORMAT " of live obj "PTR_FORMAT" in region "
" points to dead obj "PTR_FORMAT".", "["PTR_FORMAT", "PTR_FORMAT")",
p, (void*) _containing_obj, (void*) obj); p, (void*) _containing_obj,
from->bottom(), from->end());
print_object(gclog_or_tty, _containing_obj);
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
"["PTR_FORMAT", "PTR_FORMAT")",
(void*) obj, to->bottom(), to->end());
print_object(gclog_or_tty, obj);
} }
gclog_or_tty->print_cr("Live obj:");
_containing_obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("Bad referent:");
obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
_failures = true; _failures = true;
failed = true; failed = true;
@ -432,7 +450,9 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
_young_type(NotYoung), _next_young_region(NULL), _young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next_dirty_cards_region(NULL),
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
_rem_set(NULL), _zfs(NotZeroFilled) _rem_set(NULL), _zfs(NotZeroFilled),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
_predicted_bytes_to_copy(0)
{ {
_orig_end = mr.end(); _orig_end = mr.end();
// Note that initialize() will set the start of the unmarked area of the // Note that initialize() will set the start of the unmarked area of the
@ -715,7 +735,7 @@ void HeapRegion::print_on(outputStream* st) const {
else else
st->print(" "); st->print(" ");
if (is_young()) if (is_young())
st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); st->print(is_survivor() ? " SU" : " Y ");
else else
st->print(" "); st->print(" ");
if (is_empty()) if (is_empty())
@ -723,6 +743,8 @@ void HeapRegion::print_on(outputStream* st) const {
else else
st->print(" "); st->print(" ");
st->print(" %5d", _gc_time_stamp); st->print(" %5d", _gc_time_stamp);
st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
prev_top_at_mark_start(), next_top_at_mark_start());
G1OffsetTableContigSpace::print_on(st); G1OffsetTableContigSpace::print_on(st);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -247,7 +247,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum YoungType { enum YoungType {
NotYoung, // a region is not young NotYoung, // a region is not young
ScanOnly, // a region is young and scan-only
Young, // a region is young Young, // a region is young
Survivor // a region is young and it contains Survivor // a region is young and it contains
// survivor // survivor
@ -292,6 +291,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
_young_type = new_type; _young_type = new_type;
} }
// Cached attributes used in the collection set policy information
// The RSet length that was added to the total value
// for the collection set.
size_t _recorded_rs_length;
// The predicted elapsed time that was added to total value
// for the collection set.
double _predicted_elapsed_time_ms;
// The predicted number of bytes to copy that was added to
// the total value for the collection set.
size_t _predicted_bytes_to_copy;
public: public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
@ -614,7 +627,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// </PREDICTION> // </PREDICTION>
bool is_young() const { return _young_type != NotYoung; } bool is_young() const { return _young_type != NotYoung; }
bool is_scan_only() const { return _young_type == ScanOnly; }
bool is_survivor() const { return _young_type == Survivor; } bool is_survivor() const { return _young_type == Survivor; }
int young_index_in_cset() const { return _young_index_in_cset; } int young_index_in_cset() const { return _young_index_in_cset; }
@ -629,12 +641,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _surv_rate_group->age_in_group(_age_index); return _surv_rate_group->age_in_group(_age_index);
} }
void recalculate_age_in_surv_rate_group() {
assert( _surv_rate_group != NULL, "pre-condition" );
assert( _age_index > -1, "pre-condition" );
_age_index = _surv_rate_group->recalculate_age_index(_age_index);
}
void record_surv_words_in_group(size_t words_survived) { void record_surv_words_in_group(size_t words_survived) {
assert( _surv_rate_group != NULL, "pre-condition" ); assert( _surv_rate_group != NULL, "pre-condition" );
assert( _age_index > -1, "pre-condition" ); assert( _age_index > -1, "pre-condition" );
@ -676,8 +682,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_young() { set_young_type(Young); } void set_young() { set_young_type(Young); }
void set_scan_only() { set_young_type(ScanOnly); }
void set_survivor() { set_young_type(Survivor); } void set_survivor() { set_young_type(Survivor); }
void set_not_young() { set_young_type(NotYoung); } void set_not_young() { set_young_type(NotYoung); }
@ -775,6 +779,22 @@ class HeapRegion: public G1OffsetTableContigSpace {
_zero_filler = NULL; _zero_filler = NULL;
} }
size_t recorded_rs_length() const { return _recorded_rs_length; }
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
void set_recorded_rs_length(size_t rs_length) {
_recorded_rs_length = rs_length;
}
void set_predicted_elapsed_time_ms(double ms) {
_predicted_elapsed_time_ms = ms;
}
void set_predicted_bytes_to_copy(size_t bytes) {
_predicted_bytes_to_copy = bytes;
}
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)

View File

@ -662,8 +662,6 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
prt = PosParPRT::alloc(from_hr); prt = PosParPRT::alloc(from_hr);
} }
prt->init(from_hr); prt->init(from_hr);
// Record the outgoing pointer in the from_region's outgoing bitmap.
from_hr->rem_set()->add_outgoing_reference(hr());
PosParPRT* first_prt = _fine_grain_regions[ind]; PosParPRT* first_prt = _fine_grain_regions[ind];
prt->set_next(first_prt); // XXX Maybe move to init? prt->set_next(first_prt); // XXX Maybe move to init?
@ -1073,11 +1071,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr) HeapRegion* hr)
: _bosa(bosa), _other_regions(hr), : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
_outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
false /* in-resource-area */),
_iter_state(Unclaimed)
{}
void HeapRegionRemSet::setup_remset_size() { void HeapRegionRemSet::setup_remset_size() {
@ -1148,30 +1142,11 @@ void HeapRegionRemSet::par_cleanup() {
PosParPRT::par_contract_all(); PosParPRT::par_contract_all();
} }
void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
_outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
}
void HeapRegionRemSet::clear() { void HeapRegionRemSet::clear() {
clear_outgoing_entries();
_outgoing_region_map.clear();
_other_regions.clear(); _other_regions.clear();
assert(occupied() == 0, "Should be clear."); assert(occupied() == 0, "Should be clear.");
} }
void HeapRegionRemSet::clear_outgoing_entries() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
size_t i = _outgoing_region_map.get_next_one_offset(0);
while (i < _outgoing_region_map.size()) {
HeapRegion* to_region = g1h->region_at(i);
if (!to_region->in_collection_set()) {
to_region->rem_set()->clear_incoming_entry(hr());
}
i = _outgoing_region_map.get_next_one_offset(i+1);
}
}
void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) { BitMap* region_bm, BitMap* card_bm) {
_other_regions.scrub(ctbs, region_bm, card_bm); _other_regions.scrub(ctbs, region_bm, card_bm);

View File

@ -179,13 +179,6 @@ private:
OtherRegionsTable _other_regions; OtherRegionsTable _other_regions;
// One set bit for every region that has an entry for this one.
BitMap _outgoing_region_map;
// Clear entries for the current region in any rem sets named in
// the _outgoing_region_map.
void clear_outgoing_entries();
enum ParIterState { Unclaimed, Claimed, Complete }; enum ParIterState { Unclaimed, Claimed, Complete };
volatile ParIterState _iter_state; volatile ParIterState _iter_state;
volatile jlong _iter_claimed; volatile jlong _iter_claimed;
@ -243,10 +236,6 @@ public:
_other_regions.add_reference(from, tid); _other_regions.add_reference(from, tid);
} }
// Records the fact that the current region contains an outgoing
// reference into "to_hr".
void add_outgoing_reference(HeapRegion* to_hr);
// Removes any entries shown by the given bitmaps to contain only dead // Removes any entries shown by the given bitmaps to contain only dead
// objects. // objects.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,6 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
void SurvRateGroup::reset() void SurvRateGroup::reset()
{ {
_all_regions_allocated = 0; _all_regions_allocated = 0;
_scan_only_prefix = 0;
_setup_seq_num = 0; _setup_seq_num = 0;
_stats_arrays_length = 0; _stats_arrays_length = 0;
_accum_surv_rate = 0.0; _accum_surv_rate = 0.0;
@ -74,7 +73,7 @@ void SurvRateGroup::reset()
void void
SurvRateGroup::start_adding_regions() { SurvRateGroup::start_adding_regions() {
_setup_seq_num = _stats_arrays_length; _setup_seq_num = _stats_arrays_length;
_region_num = _scan_only_prefix; _region_num = 0;
_accum_surv_rate = 0.0; _accum_surv_rate = 0.0;
#if 0 #if 0
@ -163,12 +162,6 @@ SurvRateGroup::next_age_index() {
return (int) ++_all_regions_allocated; return (int) ++_all_regions_allocated;
} }
void
SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
guarantee( scan_only_prefix <= _region_num, "pre-condition" );
_scan_only_prefix = scan_only_prefix;
}
void void
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) { SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num, guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
@ -218,13 +211,12 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
#ifndef PRODUCT #ifndef PRODUCT
void void
SurvRateGroup::print() { SurvRateGroup::print() {
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)", gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
_name, _region_num, _scan_only_prefix); _name, _region_num);
for (size_t i = 0; i < _region_num; ++i) { for (size_t i = 0; i < _region_num; ++i) {
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s", gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%",
i, _surv_rate[i] * 100.0, i, _surv_rate[i] * 100.0,
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0, _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
(i < _scan_only_prefix) ? " S-O" : " ");
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ private:
int _all_regions_allocated; int _all_regions_allocated;
size_t _region_num; size_t _region_num;
size_t _scan_only_prefix;
size_t _setup_seq_num; size_t _setup_seq_num;
public: public:
@ -51,13 +50,11 @@ public:
void reset(); void reset();
void start_adding_regions(); void start_adding_regions();
void stop_adding_regions(); void stop_adding_regions();
void record_scan_only_prefix(size_t scan_only_prefix);
void record_surviving_words(int age_in_group, size_t surv_words); void record_surviving_words(int age_in_group, size_t surv_words);
void all_surviving_words_recorded(bool propagate); void all_surviving_words_recorded(bool propagate);
const char* name() { return _name; } const char* name() { return _name; }
size_t region_num() { return _region_num; } size_t region_num() { return _region_num; }
size_t scan_only_length() { return _scan_only_prefix; }
double accum_surv_rate_pred(int age) { double accum_surv_rate_pred(int age) {
assert(age >= 0, "must be"); assert(age >= 0, "must be");
if ((size_t)age < _stats_arrays_length) if ((size_t)age < _stats_arrays_length)
@ -82,17 +79,12 @@ public:
int next_age_index(); int next_age_index();
int age_in_group(int age_index) { int age_in_group(int age_index) {
int ret = (int) (_all_regions_allocated - age_index); int ret = (int) (_all_regions_allocated - age_index);
assert( ret >= 0, "invariant" ); assert( ret >= 0, "invariant" );
return ret; return ret;
} }
int recalculate_age_index(int age_index) {
int new_age_index = (int) _scan_only_prefix - age_in_group(age_index);
guarantee( new_age_index >= 0, "invariant" );
return new_age_index;
}
void finished_recalculating_age_indexes() { void finished_recalculating_age_indexes() {
_all_regions_allocated = (int) _scan_only_prefix; _all_regions_allocated = 0;
} }
#ifndef PRODUCT #ifndef PRODUCT

View File

@ -1,5 +1,5 @@
// //
// Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -161,8 +161,10 @@ parMarkBitMap.cpp psParallelCompact.hpp
parMarkBitMap.hpp bitMap.inline.hpp parMarkBitMap.hpp bitMap.inline.hpp
parMarkBitMap.hpp psVirtualspace.hpp parMarkBitMap.hpp psVirtualspace.hpp
psAdaptiveSizePolicy.cpp collectorPolicy.hpp
psAdaptiveSizePolicy.cpp gcPolicyCounters.hpp psAdaptiveSizePolicy.cpp gcPolicyCounters.hpp
psAdaptiveSizePolicy.cpp gcCause.hpp psAdaptiveSizePolicy.cpp gcCause.hpp
psAdaptiveSizePolicy.cpp generationSizer.hpp
psAdaptiveSizePolicy.cpp psAdaptiveSizePolicy.hpp psAdaptiveSizePolicy.cpp psAdaptiveSizePolicy.hpp
psAdaptiveSizePolicy.cpp psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.cpp psGCAdaptivePolicyCounters.hpp
psAdaptiveSizePolicy.cpp psScavenge.hpp psAdaptiveSizePolicy.cpp psScavenge.hpp
@ -215,6 +217,7 @@ psMarkSweep.cpp events.hpp
psMarkSweep.cpp fprofiler.hpp psMarkSweep.cpp fprofiler.hpp
psMarkSweep.cpp gcCause.hpp psMarkSweep.cpp gcCause.hpp
psMarkSweep.cpp gcLocker.inline.hpp psMarkSweep.cpp gcLocker.inline.hpp
psMarkSweep.cpp generationSizer.hpp
psMarkSweep.cpp isGCActiveMark.hpp psMarkSweep.cpp isGCActiveMark.hpp
psMarkSweep.cpp oop.inline.hpp psMarkSweep.cpp oop.inline.hpp
psMarkSweep.cpp memoryService.hpp psMarkSweep.cpp memoryService.hpp
@ -256,6 +259,7 @@ psParallelCompact.cpp fprofiler.hpp
psParallelCompact.cpp gcCause.hpp psParallelCompact.cpp gcCause.hpp
psParallelCompact.cpp gcLocker.inline.hpp psParallelCompact.cpp gcLocker.inline.hpp
psParallelCompact.cpp gcTaskManager.hpp psParallelCompact.cpp gcTaskManager.hpp
psParallelCompact.cpp generationSizer.hpp
psParallelCompact.cpp isGCActiveMark.hpp psParallelCompact.cpp isGCActiveMark.hpp
psParallelCompact.cpp management.hpp psParallelCompact.cpp management.hpp
psParallelCompact.cpp memoryService.hpp psParallelCompact.cpp memoryService.hpp
@ -344,10 +348,12 @@ psPromotionLAB.hpp objectStartArray.hpp
psScavenge.cpp psAdaptiveSizePolicy.hpp psScavenge.cpp psAdaptiveSizePolicy.hpp
psScavenge.cpp biasedLocking.hpp psScavenge.cpp biasedLocking.hpp
psScavenge.cpp cardTableExtension.hpp psScavenge.cpp cardTableExtension.hpp
psScavenge.cpp collectorPolicy.hpp
psScavenge.cpp fprofiler.hpp psScavenge.cpp fprofiler.hpp
psScavenge.cpp gcCause.hpp psScavenge.cpp gcCause.hpp
psScavenge.cpp gcLocker.inline.hpp psScavenge.cpp gcLocker.inline.hpp
psScavenge.cpp gcTaskManager.hpp psScavenge.cpp gcTaskManager.hpp
psScavenge.cpp generationSizer.hpp
psScavenge.cpp handles.inline.hpp psScavenge.cpp handles.inline.hpp
psScavenge.cpp isGCActiveMark.hpp psScavenge.cpp isGCActiveMark.hpp
psScavenge.cpp oop.inline.hpp psScavenge.cpp oop.inline.hpp

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@ adaptiveSizePolicy.hpp allocation.hpp
adaptiveSizePolicy.hpp universe.hpp adaptiveSizePolicy.hpp universe.hpp
adaptiveSizePolicy.cpp adaptiveSizePolicy.hpp adaptiveSizePolicy.cpp adaptiveSizePolicy.hpp
adaptiveSizePolicy.cpp collectorPolicy.hpp
adaptiveSizePolicy.cpp gcCause.hpp adaptiveSizePolicy.cpp gcCause.hpp
adaptiveSizePolicy.cpp ostream.hpp adaptiveSizePolicy.cpp ostream.hpp
adaptiveSizePolicy.cpp timer.hpp adaptiveSizePolicy.cpp timer.hpp

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -892,6 +892,10 @@ void ParNewGeneration::collect(bool full,
} }
swap_spaces(); swap_spaces();
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
size_policy->reset_gc_overhead_limit_count();
assert(to()->is_empty(), "to space should be empty now"); assert(to()->is_empty(), "to space should be empty now");
} else { } else {
assert(HandlePromotionFailure, assert(HandlePromotionFailure,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,15 +54,16 @@ jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize(); CollectedHeap::pre_initialize();
// Cannot be initialized until after the flags are parsed // Cannot be initialized until after the flags are parsed
GenerationSizer flag_parser; // GenerationSizer flag_parser;
_collector_policy = new GenerationSizer();
size_t yg_min_size = flag_parser.min_young_gen_size(); size_t yg_min_size = _collector_policy->min_young_gen_size();
size_t yg_max_size = flag_parser.max_young_gen_size(); size_t yg_max_size = _collector_policy->max_young_gen_size();
size_t og_min_size = flag_parser.min_old_gen_size(); size_t og_min_size = _collector_policy->min_old_gen_size();
size_t og_max_size = flag_parser.max_old_gen_size(); size_t og_max_size = _collector_policy->max_old_gen_size();
// Why isn't there a min_perm_gen_size()? // Why isn't there a min_perm_gen_size()?
size_t pg_min_size = flag_parser.perm_gen_size(); size_t pg_min_size = _collector_policy->perm_gen_size();
size_t pg_max_size = flag_parser.max_perm_gen_size(); size_t pg_max_size = _collector_policy->max_perm_gen_size();
trace_gen_sizes("ps heap raw", trace_gen_sizes("ps heap raw",
pg_min_size, pg_max_size, pg_min_size, pg_max_size,
@ -89,12 +90,14 @@ jint ParallelScavengeHeap::initialize() {
// move to the common code. // move to the common code.
yg_min_size = align_size_up(yg_min_size, yg_align); yg_min_size = align_size_up(yg_min_size, yg_align);
yg_max_size = align_size_up(yg_max_size, yg_align); yg_max_size = align_size_up(yg_max_size, yg_align);
size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); size_t yg_cur_size =
align_size_up(_collector_policy->young_gen_size(), yg_align);
yg_cur_size = MAX2(yg_cur_size, yg_min_size); yg_cur_size = MAX2(yg_cur_size, yg_min_size);
og_min_size = align_size_up(og_min_size, og_align); og_min_size = align_size_up(og_min_size, og_align);
og_max_size = align_size_up(og_max_size, og_align); og_max_size = align_size_up(og_max_size, og_align);
size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); size_t og_cur_size =
align_size_up(_collector_policy->old_gen_size(), og_align);
og_cur_size = MAX2(og_cur_size, og_min_size); og_cur_size = MAX2(og_cur_size, og_min_size);
pg_min_size = align_size_up(pg_min_size, pg_align); pg_min_size = align_size_up(pg_min_size, pg_align);
@ -355,6 +358,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = young_gen()->allocate(size, is_tlab); HeapWord* result = young_gen()->allocate(size, is_tlab);
uint loop_count = 0; uint loop_count = 0;
@ -428,24 +436,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result == NULL) { if (result == NULL) {
// Exit the loop if if the gc time limit has been exceeded.
// The allocation must have failed above (result must be NULL),
// and the most recent collection must have exceeded the
// gc time limit. Exit the loop so that an out-of-memory
// will be thrown (returning a NULL will do that), but
// clear gc_time_limit_exceeded so that the next collection
// will succeeded if the applications decides to handle the
// out-of-memory and tries to go on.
*gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
if (size_policy()->gc_time_limit_exceeded()) {
size_policy()->set_gc_time_limit_exceeded(false);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
"return NULL because gc_time_limit_exceeded is set");
}
return NULL;
}
// Generate a VM operation // Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
VMThread::execute(&op); VMThread::execute(&op);
@ -463,16 +453,34 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// If a NULL result is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded // Exit the loop if the gc time limit has been exceeded.
// flag to avoid the following situation. // The allocation must have failed above ("result" guarding
// gc_time_limit_exceeded is set during a collection // this path is NULL) and the most recent collection has exceeded the
// the collection fails to return enough space and an OOM is thrown // gc overhead limit (although enough may have been collected to
// the next GC is skipped because the gc_time_limit_exceeded // satisfy the allocation). Exit the loop so that an out-of-memory
// flag is set and another OOM is thrown // will be thrown (return a NULL ignoring the contents of
if (op.result() == NULL) { // op.result()),
size_policy()->set_gc_time_limit_exceeded(false); // but clear gc_overhead_limit_exceeded so that the next collection
// starts with a clean slate (i.e., forgets about previous overhead
// excesses). Fill op.result() with a filler object so that the
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
"return NULL because gc_overhead_limit_exceeded is set");
}
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
} }
return op.result(); return op.result();
} }
} }
@ -613,14 +621,15 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
// and the most recent collection must have exceeded the // and the most recent collection must have exceeded the
// gc time limit. Exit the loop so that an out-of-memory // gc time limit. Exit the loop so that an out-of-memory
// will be thrown (returning a NULL will do that), but // will be thrown (returning a NULL will do that), but
// clear gc_time_limit_exceeded so that the next collection // clear gc_overhead_limit_exceeded so that the next collection
// will succeeded if the applications decides to handle the // will succeeded if the applications decides to handle the
// out-of-memory and tries to go on. // out-of-memory and tries to go on.
if (size_policy()->gc_time_limit_exceeded()) { const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
size_policy()->set_gc_time_limit_exceeded(false); if (limit_exceeded) {
size_policy()->set_gc_overhead_limit_exceeded(false);
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
"return NULL because gc_time_limit_exceeded is set"); " return NULL because gc_overhead_limit_exceeded is set");
} }
assert(result == NULL, "Allocation did not fail"); assert(result == NULL, "Allocation did not fail");
return NULL; return NULL;
@ -643,14 +652,15 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// If a NULL results is being returned, an out-of-memory // If a NULL results is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded // will be thrown now. Clear the gc_overhead_limit_exceeded
// flag to avoid the following situation. // flag to avoid the following situation.
// gc_time_limit_exceeded is set during a collection // gc_overhead_limit_exceeded is set during a collection
// the collection fails to return enough space and an OOM is thrown // the collection fails to return enough space and an OOM is thrown
// the next GC is skipped because the gc_time_limit_exceeded // a subsequent GC prematurely throws an out-of-memory because
// flag is set and another OOM is thrown // the gc_overhead_limit_exceeded counts did not start
// again from 0.
if (op.result() == NULL) { if (op.result() == NULL) {
size_policy()->set_gc_time_limit_exceeded(false); size_policy()->reset_gc_overhead_limit_count();
} }
return op.result(); return op.result();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
class AdjoiningGenerations; class AdjoiningGenerations;
class GCTaskManager; class GCTaskManager;
class PSAdaptiveSizePolicy; class PSAdaptiveSizePolicy;
class GenerationSizer;
class CollectorPolicy;
class ParallelScavengeHeap : public CollectedHeap { class ParallelScavengeHeap : public CollectedHeap {
friend class VMStructs; friend class VMStructs;
@ -43,6 +45,8 @@ class ParallelScavengeHeap : public CollectedHeap {
size_t _young_gen_alignment; size_t _young_gen_alignment;
size_t _old_gen_alignment; size_t _old_gen_alignment;
GenerationSizer* _collector_policy;
inline size_t set_alignment(size_t& var, size_t val); inline size_t set_alignment(size_t& var, size_t val);
// Collection of generations that are adjacent in the // Collection of generations that are adjacent in the
@ -72,6 +76,9 @@ class ParallelScavengeHeap : public CollectedHeap {
return CollectedHeap::ParallelScavengeHeap; return CollectedHeap::ParallelScavengeHeap;
} }
CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
// GenerationSizer* collector_policy() const { return _collector_policy; }
static PSYoungGen* young_gen() { return _young_gen; } static PSYoungGen* young_gen() { return _young_gen; }
static PSOldGen* old_gen() { return _old_gen; } static PSOldGen* old_gen() { return _old_gen; }
static PSPermGen* perm_gen() { return _perm_gen; } static PSPermGen* perm_gen() { return _perm_gen; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -184,18 +184,19 @@ void PSAdaptiveSizePolicy::clear_generation_free_space_flags() {
set_change_young_gen_for_maj_pauses(0); set_change_young_gen_for_maj_pauses(0);
} }
// If this is not a full GC, only test and modify the young generation. // If this is not a full GC, only test and modify the young generation.
void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live, void PSAdaptiveSizePolicy::compute_generation_free_space(
size_t eden_live, size_t young_live,
size_t old_live, size_t eden_live,
size_t perm_live, size_t old_live,
size_t cur_eden, size_t perm_live,
size_t max_old_gen_size, size_t cur_eden,
size_t max_eden_size, size_t max_old_gen_size,
bool is_full_gc, size_t max_eden_size,
GCCause::Cause gc_cause) { bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy) {
// Update statistics // Update statistics
// Time statistics are updated as we go, update footprint stats here // Time statistics are updated as we go, update footprint stats here
@ -380,91 +381,16 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
// Is too much time being spent in GC? // Is too much time being spent in GC?
// Is the heap trying to grow beyond it's limits? // Is the heap trying to grow beyond it's limits?
const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average()); const size_t free_in_old_gen =
(size_t)(max_old_gen_size - avg_old_live()->average());
if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) { if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
check_gc_overhead_limit(young_live,
// eden_limit is the upper limit on the size of eden based on eden_live,
// the maximum size of the young generation and the sizes max_old_gen_size,
// of the survivor space. max_eden_size,
// The question being asked is whether the gc costs are high is_full_gc,
// and the space being recovered by a collection is low. gc_cause,
// free_in_young_gen is the free space in the young generation collector_policy);
// after a collection and promo_live is the free space in the old
// generation after a collection.
//
// Use the minimum of the current value of the live in the
// young gen or the average of the live in the young gen.
// If the current value drops quickly, that should be taken
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average());
const size_t free_in_eden = eden_limit > live_in_eden ?
eden_limit - live_in_eden : 0;
const size_t total_free_limit = free_in_old_gen + free_in_eden;
const size_t total_mem = max_old_gen_size + max_eden_size;
const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
if (PrintAdaptiveSizePolicy && (Verbose ||
(total_free_limit < (size_t) mem_free_limit))) {
gclog_or_tty->print_cr(
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
" promo_limit: " SIZE_FORMAT
" eden_limit: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
" max_old_gen_size: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" mem_free_limit: " SIZE_FORMAT,
promo_limit, eden_limit, total_free_limit,
max_old_gen_size, max_eden_size,
(size_t) mem_free_limit);
}
if (is_full_gc) {
if (gc_cost() > gc_cost_limit &&
total_free_limit < (size_t) mem_free_limit) {
// Collections, on average, are taking too much time, and
// gc_cost() > gc_cost_limit
// we have too little space available after a full gc.
// total_free_limit < mem_free_limit
// where
// total_free_limit is the free space available in
// both generations
// total_mem is the total space available for allocation
// in both generations (survivor spaces are not included
// just as they are not included in eden_limit).
// mem_free_limit is a fraction of total_mem judged to be an
// acceptable amount that is still unused.
// The heap can ask for the value of this variable when deciding
// whether to thrown an OutOfMemory error.
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
// Ignore explicit GC's. Ignoring explicit GC's at this level
// is the equivalent of the GC did not happen as far as the
// overhead calculation is concerted (i.e., the flag is not set
// and the count is not affected). Also the average will not
// have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
if (!GCCause::is_user_requested_gc(gc_cause) &&
!GCCause::is_serviceability_requested_gc(gc_cause)) {
inc_gc_time_limit_count();
if (UseGCOverheadLimit &&
(gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
// All conditions have been met for throwing an out-of-memory
_gc_time_limit_exceeded = true;
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
reset_gc_time_limit_count();
}
_print_gc_time_limit_would_be_exceeded = true;
}
} else {
// Did not exceed overhead limits
reset_gc_time_limit_count();
}
}
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,7 @@
// Forward decls // Forward decls
class elapsedTimer; class elapsedTimer;
class GenerationSizer;
class PSAdaptiveSizePolicy : public AdaptiveSizePolicy { class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class PSGCAdaptivePolicyCounters; friend class PSGCAdaptivePolicyCounters;
@ -340,7 +341,8 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
size_t max_old_gen_size, size_t max_old_gen_size,
size_t max_eden_size, size_t max_eden_size,
bool is_full_gc, bool is_full_gc,
GCCause::Cause gc_cause); GCCause::Cause gc_cause,
CollectorPolicy* collector_policy);
// Calculates new survivor space size; returns a new tenuring threshold // Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size. // value. Stores new survivor size in _survivor_size.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -117,11 +117,13 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK); PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded"); cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
_gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname, _gc_overhead_limit_exceeded_counter =
PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK); PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc"); cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
_live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname, _live_at_last_full_gc_counter =
PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK); PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope"); cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
@ -189,6 +191,8 @@ void PSGCAdaptivePolicyCounters::update_counters_from_policy() {
update_minor_pause_old_slope(); update_minor_pause_old_slope();
update_major_pause_young_slope(); update_major_pause_young_slope();
update_minor_collection_slope_counter(); update_minor_collection_slope_counter();
update_gc_overhead_limit_exceeded_counter();
update_live_at_last_full_gc_counter();
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,8 +44,8 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
PerfVariable* _live_space; PerfVariable* _live_space;
PerfVariable* _free_space; PerfVariable* _free_space;
PerfVariable* _avg_base_footprint; PerfVariable* _avg_base_footprint;
PerfVariable* _gc_time_limit_exceeded; PerfVariable* _gc_overhead_limit_exceeded_counter;
PerfVariable* _live_at_last_full_gc; PerfVariable* _live_at_last_full_gc_counter;
PerfVariable* _old_capacity; PerfVariable* _old_capacity;
PerfVariable* _boundary_moved; PerfVariable* _boundary_moved;
@ -169,6 +169,14 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
(jlong)(ps_size_policy()->major_pause_young_slope() * 1000) (jlong)(ps_size_policy()->major_pause_young_slope() * 1000)
); );
} }
inline void update_gc_overhead_limit_exceeded_counter() {
_gc_overhead_limit_exceeded_counter->set_value(
(jlong) ps_size_policy()->gc_overhead_limit_exceeded());
}
inline void update_live_at_last_full_gc_counter() {
_live_at_last_full_gc_counter->set_value(
(jlong)(ps_size_policy()->live_at_last_full_gc()));
}
inline void update_scavenge_skipped(int cause) { inline void update_scavenge_skipped(int cause) {
_scavenge_skipped->set_value(cause); _scavenge_skipped->set_value(cause);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -46,6 +46,12 @@ void PSMarkSweep::initialize() {
// //
// Note that this method should only be called from the vm_thread while // Note that this method should only be called from the vm_thread while
// at a safepoint! // at a safepoint!
//
// Note that the all_soft_refs_clear flag in the collector policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
void PSMarkSweep::invoke(bool maximum_heap_compaction) { void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
@ -54,24 +60,18 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause(); GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the if (ScavengeBeforeFullGC) {
// policy object if GCs are, on the whole, taking too long. If so, PSScavenge::invoke_no_policy();
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
} }
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
} }
// This method contains no policy. You should probably // This method contains no policy. You should probably
@ -89,6 +89,10 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
PSYoungGen* young_gen = heap->young_gen(); PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen(); PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen(); PSPermGen* perm_gen = heap->perm_gen();
@ -275,7 +279,8 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
true /* full gc*/, true /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
@ -326,19 +331,6 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Track memory usage and detect low memory // Track memory usage and detect low memory
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
heap->update_counters(); heap->update_counters();
if (PrintGCDetails) {
if (size_policy->print_gc_time_limit_would_be_exceeded()) {
if (size_policy->gc_time_limit_exceeded()) {
gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
"of %d%%", GCTimeLimit);
} else {
gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
"of %d%%", GCTimeLimit);
}
}
size_policy->set_print_gc_time_limit_would_be_exceeded(false);
}
} }
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1923,31 +1923,32 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// //
// Note that this method should only be called from the vm_thread while at a // Note that this method should only be called from the vm_thread while at a
// safepoint. // safepoint.
//
// Note that the all_soft_refs_clear flag in the collector policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
void PSParallelCompact::invoke(bool maximum_heap_compaction) { void PSParallelCompact::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), assert(Thread::current() == (Thread*)VMThread::vm_thread(),
"should be in vm thread"); "should be in vm thread");
ParallelScavengeHeap* heap = gc_heap(); ParallelScavengeHeap* heap = gc_heap();
GCCause::Cause gc_cause = heap->gc_cause(); GCCause::Cause gc_cause = heap->gc_cause();
assert(!heap->is_gc_active(), "not reentrant"); assert(!heap->is_gc_active(), "not reentrant");
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the if (ScavengeBeforeFullGC) {
// policy object if GCs are, on the whole, taking too long. If so, PSScavenge::invoke_no_policy();
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
} }
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
maximum_heap_compaction);
} }
bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) { bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
@ -1976,6 +1977,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PSPermGen* perm_gen = heap->perm_gen(); PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(maximum_heap_compaction,
heap->collector_policy());
if (ZapUnusedHeapArea) { if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling // Save information needed to minimize mangling
heap->record_gen_tops_before_GC(); heap->record_gen_tops_before_GC();
@ -2109,7 +2115,8 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
true /* full gc*/, true /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
heap->resize_old_gen( heap->resize_old_gen(
size_policy->calculated_old_free_size_in_bytes()); size_policy->calculated_old_free_size_in_bytes());
@ -2157,19 +2164,6 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// Track memory usage and detect low memory // Track memory usage and detect low memory
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
heap->update_counters(); heap->update_counters();
if (PrintGCDetails) {
if (size_policy->print_gc_time_limit_would_be_exceeded()) {
if (size_policy->gc_time_limit_exceeded()) {
gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
"of %d%%", GCTimeLimit);
} else {
gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
"of %d%%", GCTimeLimit);
}
}
size_policy->set_print_gc_time_limit_would_be_exceeded(false);
}
} }
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -187,8 +187,7 @@ void PSRefProcTaskExecutor::execute(EnqueueTask& task)
// //
// Note that this method should only be called from the vm_thread while // Note that this method should only be called from the vm_thread while
// at a safepoint! // at a safepoint!
void PSScavenge::invoke() void PSScavenge::invoke() {
{
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant"); assert(!Universe::heap()->is_gc_active(), "not reentrant");
@ -197,29 +196,25 @@ void PSScavenge::invoke()
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the bool scavenge_was_done = PSScavenge::invoke_no_policy();
// policy object if GCs are, on the whole, taking too long. If so,
// bail out without attempting a collection.
if (!policy->gc_time_limit_exceeded()) {
IsGCActiveMark mark;
bool scavenge_was_done = PSScavenge::invoke_no_policy(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData)
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_full_follows_scavenge(0);
if (!scavenge_was_done ||
policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
if (UsePerfData) if (UsePerfData)
counters->update_full_follows_scavenge(0); counters->update_full_follows_scavenge(full_follows_scavenge);
if (!scavenge_was_done || GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
policy->should_full_GC(heap->old_gen()->free_in_bytes())) { CollectorPolicy* cp = heap->collector_policy();
if (UsePerfData) const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
counters->update_full_follows_scavenge(full_follows_scavenge);
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); if (UseParallelOldGC) {
if (UseParallelOldGC) { PSParallelCompact::invoke_no_policy(clear_all_softrefs);
PSParallelCompact::invoke_no_policy(false); } else {
} else { PSMarkSweep::invoke_no_policy(clear_all_softrefs);
PSMarkSweep::invoke_no_policy(false);
}
} }
} }
} }
@ -447,6 +442,9 @@ bool PSScavenge::invoke_no_policy() {
size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
size_policy->update_averages(_survivor_overflow, survived, promoted); size_policy->update_averages(_survivor_overflow, survived, promoted);
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
size_policy->reset_gc_overhead_limit_count();
if (UseAdaptiveSizePolicy) { if (UseAdaptiveSizePolicy) {
// Calculate the new survivor size and tenuring threshold // Calculate the new survivor size and tenuring threshold
@ -523,7 +521,8 @@ bool PSScavenge::invoke_no_policy() {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
false /* full gc*/, false /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
} }
// Resize the young generation at every collection // Resize the young generation at every collection

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,13 +44,15 @@ AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
_survivor_size(init_survivor_size), _survivor_size(init_survivor_size),
_gc_pause_goal_sec(gc_pause_goal_sec), _gc_pause_goal_sec(gc_pause_goal_sec),
_throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))), _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
_gc_time_limit_exceeded(false), _gc_overhead_limit_exceeded(false),
_print_gc_time_limit_would_be_exceeded(false), _print_gc_overhead_limit_would_be_exceeded(false),
_gc_time_limit_count(0), _gc_overhead_limit_count(0),
_latest_minor_mutator_interval_seconds(0), _latest_minor_mutator_interval_seconds(0),
_threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0), _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
_young_gen_change_for_minor_throughput(0), _young_gen_change_for_minor_throughput(0),
_old_gen_change_for_major_throughput(0) { _old_gen_change_for_major_throughput(0) {
assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
"No opportunity to clear SoftReferences before GC overhead limit");
_avg_minor_pause = _avg_minor_pause =
new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding); new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
_avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight); _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
@ -278,6 +280,147 @@ void AdaptiveSizePolicy::clear_generation_free_space_flags() {
set_decide_at_full_gc(0); set_decide_at_full_gc(0);
} }
void AdaptiveSizePolicy::check_gc_overhead_limit(
size_t young_live,
size_t eden_live,
size_t max_old_gen_size,
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy) {
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
if (GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
return;
}
// eden_limit is the upper limit on the size of eden based on
// the maximum size of the young generation and the sizes
// of the survivor space.
// The question being asked is whether the gc costs are high
// and the space being recovered by a collection is low.
// free_in_young_gen is the free space in the young generation
// after a collection and promo_live is the free space in the old
// generation after a collection.
//
// Use the minimum of the current value of the live in the
// young gen or the average of the live in the young gen.
// If the current value drops quickly, that should be taken
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
const size_t live_in_eden =
MIN2(eden_live, (size_t) avg_eden_live()->average());
const size_t free_in_eden = max_eden_size > live_in_eden ?
max_eden_size - live_in_eden : 0;
const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
const size_t total_free_limit = free_in_old_gen + free_in_eden;
const size_t total_mem = max_old_gen_size + max_eden_size;
const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
const double gc_cost_limit = GCTimeLimit/100.0;
size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
// But don't force a promo size below the current promo size. Otherwise,
// the promo size will shrink for no good reason.
promo_limit = MAX2(promo_limit, _promo_size);
if (PrintAdaptiveSizePolicy && (Verbose ||
(free_in_old_gen < (size_t) mem_free_old_limit &&
free_in_eden < (size_t) mem_free_eden_limit))) {
gclog_or_tty->print_cr(
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
" promo_limit: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
" max_old_gen_size: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" mem_free_limit: " SIZE_FORMAT,
promo_limit, max_eden_size, total_free_limit,
max_old_gen_size, max_eden_size,
(size_t) mem_free_limit);
}
bool print_gc_overhead_limit_would_be_exceeded = false;
if (is_full_gc) {
if (gc_cost() > gc_cost_limit &&
free_in_old_gen < (size_t) mem_free_old_limit &&
free_in_eden < (size_t) mem_free_eden_limit) {
// Collections, on average, are taking too much time, and
// gc_cost() > gc_cost_limit
// we have too little space available after a full gc.
// total_free_limit < mem_free_limit
// where
// total_free_limit is the free space available in
// both generations
// total_mem is the total space available for allocation
// in both generations (survivor spaces are not included
// just as they are not included in eden_limit).
// mem_free_limit is a fraction of total_mem judged to be an
// acceptable amount that is still unused.
// The heap can ask for the value of this variable when deciding
// whether to thrown an OutOfMemory error.
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
// At this point the GC overhead limit is being exceeded.
inc_gc_overhead_limit_count();
if (UseGCOverheadLimit) {
if (gc_overhead_limit_count() >=
AdaptiveSizePolicyGCTimeLimitThreshold){
// All conditions have been met for throwing an out-of-memory
set_gc_overhead_limit_exceeded(true);
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
reset_gc_overhead_limit_count();
} else {
// The required consecutive collections which exceed the
// GC time limit may or may not have been reached. We
// are approaching that condition and so as not to
// throw an out-of-memory before all SoftRef's have been
// cleared, set _should_clear_all_soft_refs in CollectorPolicy.
// The clearing will be done on the next GC.
bool near_limit = gc_overhead_limit_near();
if (near_limit) {
collector_policy->set_should_clear_all_soft_refs(true);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Nearing GC overhead limit, "
"will be clearing all SoftReference");
}
}
}
}
// Set this even when the overhead limit will not
// cause an out-of-memory. Diagnostic message indicating
// that the overhead limit is being exceeded is sometimes
// printed.
print_gc_overhead_limit_would_be_exceeded = true;
} else {
// Did not exceed overhead limits
reset_gc_overhead_limit_count();
}
}
if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
if (gc_overhead_limit_exceeded()) {
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
"of %d%%", GCTimeLimit);
reset_gc_overhead_limit_count();
} else if (print_gc_overhead_limit_would_be_exceeded) {
assert(gc_overhead_limit_count() > 0, "Should not be printing");
gclog_or_tty->print_cr(" GC would exceed overhead limit "
"of %d%% %d consecutive time(s)",
GCTimeLimit, gc_overhead_limit_count());
}
}
}
// Printing // Printing
bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const { bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
// Forward decls // Forward decls
class elapsedTimer; class elapsedTimer;
class CollectorPolicy;
class AdaptiveSizePolicy : public CHeapObj { class AdaptiveSizePolicy : public CHeapObj {
friend class GCAdaptivePolicyCounters; friend class GCAdaptivePolicyCounters;
@ -75,13 +76,16 @@ class AdaptiveSizePolicy : public CHeapObj {
// This is a hint for the heap: we've detected that gc times // This is a hint for the heap: we've detected that gc times
// are taking longer than GCTimeLimit allows. // are taking longer than GCTimeLimit allows.
bool _gc_time_limit_exceeded; bool _gc_overhead_limit_exceeded;
// Use for diagnostics only. If UseGCTimeLimit is false, // Use for diagnostics only. If UseGCOverheadLimit is false,
// this variable is still set. // this variable is still set.
bool _print_gc_time_limit_would_be_exceeded; bool _print_gc_overhead_limit_would_be_exceeded;
// Count of consecutive GC that have exceeded the // Count of consecutive GC that have exceeded the
// GC time limit criterion. // GC time limit criterion.
uint _gc_time_limit_count; uint _gc_overhead_limit_count;
// This flag signals that GCTimeLimit is being exceeded
// but may not have done so for the required number of consequetive
// collections.
// Minor collection timers used to determine both // Minor collection timers used to determine both
// pause and interval times for collections. // pause and interval times for collections.
@ -406,22 +410,21 @@ class AdaptiveSizePolicy : public CHeapObj {
// Most heaps will choose to throw an OutOfMemoryError when // Most heaps will choose to throw an OutOfMemoryError when
// this occurs but it is up to the heap to request this information // this occurs but it is up to the heap to request this information
// of the policy // of the policy
bool gc_time_limit_exceeded() { bool gc_overhead_limit_exceeded() {
return _gc_time_limit_exceeded; return _gc_overhead_limit_exceeded;
} }
void set_gc_time_limit_exceeded(bool v) { void set_gc_overhead_limit_exceeded(bool v) {
_gc_time_limit_exceeded = v; _gc_overhead_limit_exceeded = v;
}
bool print_gc_time_limit_would_be_exceeded() {
return _print_gc_time_limit_would_be_exceeded;
}
void set_print_gc_time_limit_would_be_exceeded(bool v) {
_print_gc_time_limit_would_be_exceeded = v;
} }
uint gc_time_limit_count() { return _gc_time_limit_count; } // Tests conditions indicate the GC overhead limit is being approached.
void reset_gc_time_limit_count() { _gc_time_limit_count = 0; } bool gc_overhead_limit_near() {
void inc_gc_time_limit_count() { _gc_time_limit_count++; } return gc_overhead_limit_count() >=
(AdaptiveSizePolicyGCTimeLimitThreshold - 1);
}
uint gc_overhead_limit_count() { return _gc_overhead_limit_count; }
void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; }
void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; }
// accessors for flags recording the decisions to resize the // accessors for flags recording the decisions to resize the
// generations to meet the pause goal. // generations to meet the pause goal.
@ -436,6 +439,16 @@ class AdaptiveSizePolicy : public CHeapObj {
int decide_at_full_gc() { return _decide_at_full_gc; } int decide_at_full_gc() { return _decide_at_full_gc; }
void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; } void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; }
// Check the conditions for an out-of-memory due to excessive GC time.
// Set _gc_overhead_limit_exceeded if all the conditions have been met.
void check_gc_overhead_limit(size_t young_live,
size_t eden_live,
size_t max_old_gen_size,
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy);
// Printing support // Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const; virtual bool print_adaptive_size_policy_on(outputStream* st) const;
bool print_adaptive_size_policy_on(outputStream* st, int bool print_adaptive_size_policy_on(outputStream* st, int

View File

@ -115,11 +115,25 @@ bool VM_GC_HeapInspection::skip_operation() const {
void VM_GC_HeapInspection::doit() { void VM_GC_HeapInspection::doit() {
HandleMark hm; HandleMark hm;
CollectedHeap* ch = Universe::heap(); CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
if (_full_gc) { if (_full_gc) {
ch->collect_as_vm_thread(GCCause::_heap_inspection); // The collection attempt below would be skipped anyway if
} else { // the gc locker is held. The following dump may then be a tad
// make the heap parsable (no need to retire TLABs) // misleading to someone expecting only live objects to show
ch->ensure_parsability(false); // up in the dump (see CR 6944195). Just issue a suitable warning
// in that case and do not attempt to do a collection.
// The latter is a subtle point, because even a failed attempt
// to GC will, in fact, induce one in the future, which we
// probably want to avoid in this case because the GC that we may
// be about to attempt holds value for us only
// if it happens now and not if it happens in the eventual
// future.
if (GC_locker::is_active()) {
warning("GC locker is held; pre-dump GC was skipped");
} else {
ch->collect_as_vm_thread(GCCause::_heap_inspection);
}
} }
HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -89,8 +89,19 @@ class VM_GC_Operation: public VM_Operation {
if (full) { if (full) {
_full_gc_count_before = full_gc_count_before; _full_gc_count_before = full_gc_count_before;
} }
// In ParallelScavengeHeap::mem_allocate() collections can be
// executed within a loop and _all_soft_refs_clear can be set
// true after they have been cleared by a collection and another
// collection started so that _all_soft_refs_clear can be true
// when this collection is started. Don't assert that
// _all_soft_refs_clear have to be false here even though
// mutators have run. Soft refs will be cleared again in this
// collection.
}
~VM_GC_Operation() {
CollectedHeap* ch = Universe::heap();
ch->collector_policy()->set_all_soft_refs_clear(false);
} }
~VM_GC_Operation() {}
// Acquire the reference synchronization lock // Acquire the reference synchronization lock
virtual bool doit_prologue(); virtual bool doit_prologue();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@ class BarrierSet;
class ThreadClosure; class ThreadClosure;
class AdaptiveSizePolicy; class AdaptiveSizePolicy;
class Thread; class Thread;
class CollectorPolicy;
// //
// CollectedHeap // CollectedHeap
@ -506,6 +507,9 @@ class CollectedHeap : public CHeapObj {
// Return the AdaptiveSizePolicy for the heap. // Return the AdaptiveSizePolicy for the heap.
virtual AdaptiveSizePolicy* size_policy() = 0; virtual AdaptiveSizePolicy* size_policy() = 0;
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
// Iterate over all the ref-containing fields of all objects, calling // Iterate over all the ref-containing fields of all objects, calling
// "cl.do_oop" on each. This includes objects in permanent memory. // "cl.do_oop" on each. This includes objects in permanent memory.
virtual void oop_iterate(OopClosure* cl) = 0; virtual void oop_iterate(OopClosure* cl) = 0;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -112,6 +112,11 @@ void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
} }
} }
bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
bool result = _should_clear_all_soft_refs;
set_should_clear_all_soft_refs(false);
return result;
}
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
int max_covered_regions) { int max_covered_regions) {
@ -126,6 +131,17 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
} }
} }
void CollectorPolicy::cleared_all_soft_refs() {
// If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
// have been cleared in the last collection but if the gc overhear
// limit continues to be near, SoftRefs should still be cleared.
if (size_policy() != NULL) {
_should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
}
_all_soft_refs_clear = true;
}
// GenCollectorPolicy methods. // GenCollectorPolicy methods.
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
@ -489,6 +505,12 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
debug_only(gch->check_for_valid_allocation_state()); debug_only(gch->check_for_valid_allocation_state());
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL; HeapWord* result = NULL;
// Loop until the allocation is satisified, // Loop until the allocation is satisified,
@ -524,12 +546,6 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result; return result;
} }
// There are NULL's returned for different circumstances below.
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
if (is_tlab) { if (is_tlab) {
return NULL; // Caller will retry allocating individual object return NULL; // Caller will retry allocating individual object
@ -568,18 +584,6 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
gc_count_before = Universe::heap()->total_collections(); gc_count_before = Universe::heap()->total_collections();
} }
// Allocation has failed and a collection is about
// to be done. If the gc time limit was exceeded the
// last time a collection was done, return NULL so
// that an out-of-memory will be thrown. Clear
// gc_time_limit_exceeded so that subsequent attempts
// at a collection will be made.
if (size_policy()->gc_time_limit_exceeded()) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_time_limit_exceeded(false);
return NULL;
}
VM_GenCollectForAllocation op(size, VM_GenCollectForAllocation op(size,
is_tlab, is_tlab,
gc_count_before); gc_count_before);
@ -590,6 +594,24 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
assert(result == NULL, "must be NULL if gc_locked() is true"); assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// Allocation has failed and a collection
// has been done. If the gc time limit was exceeded the
// this time, return NULL so that an out-of-memory
// will be thrown. Clear gc_overhead_limit_exceeded
// so that the overhead exceeded does not persist.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = all_soft_refs_clear();
assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
}
assert(result == NULL || gch->is_in_reserved(result), assert(result == NULL || gch->is_in_reserved(result),
"result not in heap"); "result not in heap");
return result; return result;
@ -688,6 +710,9 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
return result; return result;
} }
assert(!should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more // space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be // complete compaction phase than we've tried so far might be

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -69,12 +69,28 @@ class CollectorPolicy : public CHeapObj {
size_t _min_alignment; size_t _min_alignment;
size_t _max_alignment; size_t _max_alignment;
// The sizing of the heap are controlled by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Set to true when policy wants soft refs cleared.
// Reset to false by gc after it clears all soft refs.
bool _should_clear_all_soft_refs;
// Set to true by the GC if the just-completed gc cleared all
// softrefs. This is set to true whenever a gc clears all softrefs, and
// set to false each time gc returns to the mutator. For example, in the
// ParallelScavengeHeap case the latter would be done toward the end of
// mem_allocate() where it returns op.result()
bool _all_soft_refs_clear;
CollectorPolicy() : CollectorPolicy() :
_min_alignment(1), _min_alignment(1),
_max_alignment(1), _max_alignment(1),
_initial_heap_byte_size(0), _initial_heap_byte_size(0),
_max_heap_byte_size(0), _max_heap_byte_size(0),
_min_heap_byte_size(0) _min_heap_byte_size(0),
_size_policy(NULL),
_should_clear_all_soft_refs(false),
_all_soft_refs_clear(false)
{} {}
public: public:
@ -98,6 +114,19 @@ class CollectorPolicy : public CHeapObj {
G1CollectorPolicyKind G1CollectorPolicyKind
}; };
AdaptiveSizePolicy* size_policy() { return _size_policy; }
bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
// Returns the current value of _should_clear_all_soft_refs.
// _should_clear_all_soft_refs is set to false as a side effect.
bool use_should_clear_all_soft_refs(bool v);
bool all_soft_refs_clear() { return _all_soft_refs_clear; }
void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
// Called by the GC after Soft Refs have been cleared to indicate
// that the request in _should_clear_all_soft_refs has been fulfilled.
void cleared_all_soft_refs();
// Identification methods. // Identification methods.
virtual GenCollectorPolicy* as_generation_policy() { return NULL; } virtual GenCollectorPolicy* as_generation_policy() { return NULL; }
virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; }
@ -165,6 +194,22 @@ class CollectorPolicy : public CHeapObj {
}; };
class ClearedAllSoftRefs : public StackObj {
bool _clear_all_soft_refs;
CollectorPolicy* _collector_policy;
public:
ClearedAllSoftRefs(bool clear_all_soft_refs,
CollectorPolicy* collector_policy) :
_clear_all_soft_refs(clear_all_soft_refs),
_collector_policy(collector_policy) {}
~ClearedAllSoftRefs() {
if (_clear_all_soft_refs) {
_collector_policy->cleared_all_soft_refs();
}
}
};
class GenCollectorPolicy : public CollectorPolicy { class GenCollectorPolicy : public CollectorPolicy {
protected: protected:
size_t _min_gen0_size; size_t _min_gen0_size;
@ -173,10 +218,6 @@ class GenCollectorPolicy : public CollectorPolicy {
GenerationSpec **_generations; GenerationSpec **_generations;
// The sizing of the different generations in the heap are controlled
// by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Return true if an allocation should be attempted in the older // Return true if an allocation should be attempted in the older
// generation if it fails in the younger generation. Return // generation if it fails in the younger generation. Return
// false, otherwise. // false, otherwise.
@ -236,14 +277,11 @@ class GenCollectorPolicy : public CollectorPolicy {
virtual size_t large_typearray_limit(); virtual size_t large_typearray_limit();
// Adaptive size policy // Adaptive size policy
AdaptiveSizePolicy* size_policy() { return _size_policy; }
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size); size_t init_survivor_size);
}; };
// All of hotspot's current collectors are subtypes of this // All of hotspot's current collectors are subtypes of this
// class. Currently, these collectors all use the same gen[0], // class. Currently, these collectors all use the same gen[0],
// but have different gen[1] types. If we add another subtype // but have different gen[1] types. If we add another subtype

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -594,6 +594,10 @@ void DefNewGeneration::collect(bool full,
_tenuring_threshold = _tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
size_policy->reset_gc_overhead_limit_count();
if (PrintGC && !PrintGCDetails) { if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used); gch->print_heap_change(gch_prev_used);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -428,7 +428,8 @@ void GenCollectedHeap::do_collection(bool full,
assert(my_thread->is_VM_thread() || assert(my_thread->is_VM_thread() ||
my_thread->is_ConcurrentGC_thread(), my_thread->is_ConcurrentGC_thread(),
"incorrect thread type capability"); "incorrect thread type capability");
assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); assert(Heap_lock->is_locked(),
"the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant"); guarantee(!is_gc_active(), "collection is not reentrant");
assert(max_level < n_gens(), "sanity check"); assert(max_level < n_gens(), "sanity check");
@ -436,6 +437,11 @@ void GenCollectedHeap::do_collection(bool full,
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
const size_t perm_prev_used = perm_gen()->used(); const size_t perm_prev_used = perm_gen()->used();
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
@ -560,11 +566,11 @@ void GenCollectedHeap::do_collection(bool full,
if (rp->discovery_is_atomic()) { if (rp->discovery_is_atomic()) {
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
rp->enable_discovery(); rp->enable_discovery();
rp->setup_policy(clear_all_soft_refs); rp->setup_policy(do_clear_all_soft_refs);
} else { } else {
// collect() below will enable discovery as appropriate // collect() below will enable discovery as appropriate
} }
_gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
if (!rp->enqueuing_is_done()) { if (!rp->enqueuing_is_done()) {
rp->enqueue_discovered_references(); rp->enqueue_discovered_references();
} else { } else {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,13 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
bool clear_all_softrefs) { bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep // hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping"); assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL"); assert(rp != NULL, "should be non-NULL");
@ -44,7 +51,6 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// Increment the invocation count for the permanent generation, since it is // Increment the invocation count for the permanent generation, since it is
// implicitly collected whenever we do a full mark sweep collection. // implicitly collected whenever we do a full mark sweep collection.
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->perm_gen()->stat_record()->invocations++; gch->perm_gen()->stat_record()->invocations++;
// Capture heap size before collection for printing. // Capture heap size before collection for printing.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,7 @@ size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
// See the comment at the top of g1MemoryPool.hpp // See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) { size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
size_t young_list_length = g1h->young_list_length(); size_t young_list_length = g1h->young_list()->length();
size_t eden_used = young_list_length * HeapRegion::GrainBytes; size_t eden_used = young_list_length * HeapRegion::GrainBytes;
size_t survivor_used = survivor_space_used(g1h); size_t survivor_used = survivor_space_used(g1h);
eden_used = subtract_up_to_zero(eden_used, survivor_used); eden_used = subtract_up_to_zero(eden_used, survivor_used);