Merge
This commit is contained in:
commit
a8f9284064
@ -4234,7 +4234,6 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
||||
static jint num_ct_writes = 0;
|
||||
static jint num_ct_writes_filtered_in_hr = 0;
|
||||
static jint num_ct_writes_filtered_null = 0;
|
||||
static jint num_ct_writes_filtered_pop = 0;
|
||||
static G1CollectedHeap* g1 = NULL;
|
||||
|
||||
static Thread* count_ct_writes(void* filter_val, void* new_val) {
|
||||
@ -4247,25 +4246,19 @@ static Thread* count_ct_writes(void* filter_val, void* new_val) {
|
||||
if (g1 == NULL) {
|
||||
g1 = G1CollectedHeap::heap();
|
||||
}
|
||||
if ((HeapWord*)new_val < g1->popular_object_boundary()) {
|
||||
Atomic::inc(&num_ct_writes_filtered_pop);
|
||||
}
|
||||
}
|
||||
if ((num_ct_writes % 1000000) == 0) {
|
||||
jint num_ct_writes_filtered =
|
||||
num_ct_writes_filtered_in_hr +
|
||||
num_ct_writes_filtered_null +
|
||||
num_ct_writes_filtered_pop;
|
||||
num_ct_writes_filtered_null;
|
||||
|
||||
tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
|
||||
" (%5.2f%% intra-HR, %5.2f%% null, %5.2f%% popular).",
|
||||
" (%5.2f%% intra-HR, %5.2f%% null).",
|
||||
num_ct_writes,
|
||||
100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
|
||||
100.0*(float)num_ct_writes_filtered_in_hr/
|
||||
(float)num_ct_writes,
|
||||
100.0*(float)num_ct_writes_filtered_null/
|
||||
(float)num_ct_writes,
|
||||
100.0*(float)num_ct_writes_filtered_pop/
|
||||
(float)num_ct_writes);
|
||||
}
|
||||
return Thread::current();
|
||||
|
@ -3847,7 +3847,7 @@ bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
|
||||
MutexLockerEx ml(ovflw_stk->par_lock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
// Grab up to 1/4 the size of the work queue
|
||||
size_t num = MIN2((size_t)work_q->max_elems()/4,
|
||||
size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
num = MIN2(num, ovflw_stk->length());
|
||||
for (int i = (int) num; i > 0; i--) {
|
||||
@ -5204,13 +5204,12 @@ CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
|
||||
NOT_PRODUCT(int num_steals = 0;)
|
||||
oop obj_to_scan;
|
||||
CMSBitMap* bm = &(_collector->_markBitMap);
|
||||
size_t num_from_overflow_list =
|
||||
MIN2((size_t)work_q->max_elems()/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
while (true) {
|
||||
// Completely finish any left over work from (an) earlier round(s)
|
||||
cl->trim_queue(0);
|
||||
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
// Now check if there's any work in the overflow list
|
||||
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
||||
work_q)) {
|
||||
@ -5622,13 +5621,12 @@ void CMSRefProcTaskProxy::do_work_steal(int i,
|
||||
OopTaskQueue* work_q = work_queue(i);
|
||||
NOT_PRODUCT(int num_steals = 0;)
|
||||
oop obj_to_scan;
|
||||
size_t num_from_overflow_list =
|
||||
MIN2((size_t)work_q->max_elems()/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
while (true) {
|
||||
// Completely finish any left over work from (an) earlier round(s)
|
||||
drain->trim_queue(0);
|
||||
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
// Now check if there's any work in the overflow list
|
||||
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
|
||||
work_q)) {
|
||||
@ -9021,7 +9019,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
|
||||
// Transfer some number of overflown objects to usual marking
|
||||
// stack. Return true if some objects were transferred.
|
||||
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
|
||||
size_t num = MIN2((size_t)_mark_stack->capacity()/4,
|
||||
size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
bool res = _collector->take_from_overflow_list(num, _mark_stack);
|
||||
|
@ -277,8 +277,6 @@ printHeapRegion(HeapRegion *hr) {
|
||||
gclog_or_tty->print("H: ");
|
||||
if (hr->in_collection_set())
|
||||
gclog_or_tty->print("CS: ");
|
||||
if (hr->popular())
|
||||
gclog_or_tty->print("pop: ");
|
||||
gclog_or_tty->print_cr("Region " PTR_FORMAT " (%s%s) "
|
||||
"[" PTR_FORMAT ", " PTR_FORMAT"] "
|
||||
"Used: " SIZE_FORMAT "K, garbage: " SIZE_FORMAT "K.",
|
||||
|
@ -42,21 +42,6 @@
|
||||
|
||||
// Local to this file.
|
||||
|
||||
// Finds the first HeapRegion.
|
||||
// No longer used, but might be handy someday.
|
||||
|
||||
class FindFirstRegionClosure: public HeapRegionClosure {
|
||||
HeapRegion* _a_region;
|
||||
public:
|
||||
FindFirstRegionClosure() : _a_region(NULL) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
_a_region = r;
|
||||
return true;
|
||||
}
|
||||
HeapRegion* result() { return _a_region; }
|
||||
};
|
||||
|
||||
|
||||
class RefineCardTableEntryClosure: public CardTableEntryClosure {
|
||||
SuspendibleThreadSet* _sts;
|
||||
G1RemSet* _g1rs;
|
||||
@ -1207,13 +1192,12 @@ G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
|
||||
bool par) {
|
||||
assert(!hr->continuesHumongous(), "should have filtered these out");
|
||||
size_t res = 0;
|
||||
if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) {
|
||||
if (!hr->is_young()) {
|
||||
if (G1PolicyVerbose > 0)
|
||||
gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
|
||||
" during cleanup", hr, hr->used());
|
||||
free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
|
||||
}
|
||||
if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
|
||||
!hr->is_young()) {
|
||||
if (G1PolicyVerbose > 0)
|
||||
gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
|
||||
" during cleanup", hr, hr->used());
|
||||
free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1342,10 +1326,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_refine_cte_cl(NULL),
|
||||
_free_region_list(NULL), _free_region_list_size(0),
|
||||
_free_regions(0),
|
||||
_popular_object_boundary(NULL),
|
||||
_cur_pop_hr_index(0),
|
||||
_popular_regions_to_be_evacuated(NULL),
|
||||
_pop_obj_rc_at_copy(),
|
||||
_full_collection(false),
|
||||
_unclean_region_list(),
|
||||
_unclean_regions_coming(false),
|
||||
@ -1520,26 +1500,11 @@ jint G1CollectedHeap::initialize() {
|
||||
_czft = new ConcurrentZFThread();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Allocate the popular regions; take them off free lists.
|
||||
size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes;
|
||||
expand(pop_byte_size);
|
||||
_popular_object_boundary =
|
||||
_g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords);
|
||||
for (int i = 0; i < G1NumPopularRegions; i++) {
|
||||
HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords);
|
||||
// assert(hr != NULL && hr->bottom() < _popular_object_boundary,
|
||||
// "Should be enough, and all should be below boundary.");
|
||||
hr->set_popular(true);
|
||||
}
|
||||
assert(_cur_pop_hr_index == 0, "Start allocating at the first region.");
|
||||
|
||||
// Initialize the from_card cache structure of HeapRegionRemSet.
|
||||
HeapRegionRemSet::init_heap(max_regions());
|
||||
|
||||
// Now expand into the rest of the initial heap size.
|
||||
expand(init_byte_size - pop_byte_size);
|
||||
// Now expand into the initial heap size.
|
||||
expand(init_byte_size);
|
||||
|
||||
// Perform any initialization actions delegated to the policy.
|
||||
g1_policy()->init();
|
||||
@ -1654,8 +1619,7 @@ size_t G1CollectedHeap::recalculate_used() const {
|
||||
class SumUsedRegionsClosure: public HeapRegionClosure {
|
||||
size_t _num;
|
||||
public:
|
||||
// _num is set to 1 to account for the popular region
|
||||
SumUsedRegionsClosure() : _num(G1NumPopularRegions) {}
|
||||
SumUsedRegionsClosure() : _num(0) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
|
||||
_num += 1;
|
||||
@ -1758,14 +1722,20 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::oop_iterate(OopClosure* cl) {
|
||||
void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
|
||||
IterateOopClosureRegionClosure blk(_g1_committed, cl);
|
||||
_hrs->iterate(&blk);
|
||||
if (do_perm) {
|
||||
perm_gen()->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
|
||||
void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
|
||||
IterateOopClosureRegionClosure blk(mr, cl);
|
||||
_hrs->iterate(&blk);
|
||||
if (do_perm) {
|
||||
perm_gen()->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
// Iterates an ObjectClosure over all objects within a HeapRegion.
|
||||
@ -1782,9 +1752,12 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
|
||||
IterateObjectClosureRegionClosure blk(cl);
|
||||
_hrs->iterate(&blk);
|
||||
if (do_perm) {
|
||||
perm_gen()->object_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
|
||||
@ -2318,9 +2291,6 @@ void G1CollectedHeap::print_tracing_info() const {
|
||||
if (SummarizeG1ZFStats) {
|
||||
ConcurrentZFThread::print_summary_info();
|
||||
}
|
||||
if (G1SummarizePopularity) {
|
||||
print_popularity_summary_info();
|
||||
}
|
||||
g1_policy()->print_yg_surv_rate_info();
|
||||
|
||||
GCOverheadReporter::printGCOverhead();
|
||||
@ -2414,7 +2384,7 @@ G1CollectedHeap::checkConcurrentMark() {
|
||||
VerifyMarkedObjsClosure verifycl(this);
|
||||
// MutexLockerEx x(getMarkBitMapLock(),
|
||||
// Mutex::_no_safepoint_check_flag);
|
||||
object_iterate(&verifycl);
|
||||
object_iterate(&verifycl, false);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_sync_mark() {
|
||||
@ -2495,30 +2465,19 @@ G1CollectedHeap::cleanup_surviving_young_words() {
|
||||
// </NEW PREDICTION>
|
||||
|
||||
void
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (popular_region != NULL)
|
||||
strcat(verbose_str, "(popular)");
|
||||
else if (g1_policy()->in_young_gc_mode()) {
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
if (g1_policy()->full_young_gcs())
|
||||
strcat(verbose_str, "(young)");
|
||||
else
|
||||
strcat(verbose_str, "(partial)");
|
||||
}
|
||||
bool reset_should_initiate_conc_mark = false;
|
||||
if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) {
|
||||
// we currently do not allow an initial mark phase to be piggy-backed
|
||||
// on a popular pause
|
||||
reset_should_initiate_conc_mark = true;
|
||||
g1_policy()->unset_should_initiate_conc_mark();
|
||||
}
|
||||
if (g1_policy()->should_initiate_conc_mark())
|
||||
strcat(verbose_str, " (initial-mark)");
|
||||
|
||||
GCCauseSetter x(this, (popular_region == NULL ?
|
||||
GCCause::_g1_inc_collection_pause :
|
||||
GCCause::_g1_pop_region_collection_pause));
|
||||
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
|
||||
|
||||
// if PrintGCDetails is on, we'll print long statistics information
|
||||
// in the collector policy code, so let's not print this as the output
|
||||
@ -2609,7 +2568,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
save_marks();
|
||||
|
||||
// We must do this before any possible evacuation that should propagate
|
||||
// marks, including evacuation of popular objects in a popular pause.
|
||||
// marks.
|
||||
if (mark_in_progress()) {
|
||||
double start_time_sec = os::elapsedTime();
|
||||
|
||||
@ -2626,29 +2585,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
bool abandoned = false;
|
||||
|
||||
if (mark_in_progress())
|
||||
concurrent_mark()->newCSet();
|
||||
|
||||
// Now choose the CS.
|
||||
if (popular_region == NULL) {
|
||||
g1_policy()->choose_collection_set();
|
||||
} else {
|
||||
// We may be evacuating a single region (for popularity).
|
||||
g1_policy()->record_popular_pause_preamble_start();
|
||||
popularity_pause_preamble(popular_region);
|
||||
g1_policy()->record_popular_pause_preamble_end();
|
||||
abandoned = (g1_policy()->collection_set() == NULL);
|
||||
// Now we allow more regions to be added (we have to collect
|
||||
// all popular regions).
|
||||
if (!abandoned) {
|
||||
g1_policy()->choose_collection_set(popular_region);
|
||||
}
|
||||
}
|
||||
g1_policy()->choose_collection_set();
|
||||
|
||||
// We may abandon a pause if we find no region that will fit in the MMU
|
||||
// pause.
|
||||
abandoned = (g1_policy()->collection_set() == NULL);
|
||||
bool abandoned = (g1_policy()->collection_set() == NULL);
|
||||
|
||||
// Nothing to do if we were unable to choose a collection set.
|
||||
if (!abandoned) {
|
||||
@ -2673,12 +2618,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
|
||||
if (popular_region != NULL) {
|
||||
// We have to wait until now, because we don't want the region to
|
||||
// be rescheduled for pop-evac during RS update.
|
||||
popular_region->set_popular_pending(false);
|
||||
}
|
||||
|
||||
release_gc_alloc_regions(false /* totally */);
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
@ -2724,8 +2663,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_pause_time_ms(pause_time_ms);
|
||||
GCOverheadReporter::recordSTWEnd(end_time_sec);
|
||||
g1_policy()->record_collection_pause_end(popular_region != NULL,
|
||||
abandoned);
|
||||
g1_policy()->record_collection_pause_end(abandoned);
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
@ -2759,9 +2697,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
|
||||
assert(verify_region_lists(), "Bad region lists.");
|
||||
|
||||
if (reset_should_initiate_conc_mark)
|
||||
g1_policy()->set_should_initiate_conc_mark();
|
||||
|
||||
if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
||||
gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
||||
print_tracing_info();
|
||||
@ -4707,7 +4642,6 @@ G1CollectedHeap::free_region_work(HeapRegion* hr,
|
||||
size_t& freed_regions,
|
||||
UncleanRegionList* list,
|
||||
bool par) {
|
||||
assert(!hr->popular(), "should not free popular regions");
|
||||
pre_used += hr->used();
|
||||
if (hr->isHumongous()) {
|
||||
assert(hr->startsHumongous(),
|
||||
@ -4791,12 +4725,6 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
|
||||
|
||||
void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
|
||||
// First do any popular regions.
|
||||
HeapRegion* hr;
|
||||
while ((hr = popular_region_to_evac()) != NULL) {
|
||||
evac_popular_region(hr);
|
||||
}
|
||||
// Now do heuristic pauses.
|
||||
if (g1_policy()->should_do_collection_pause(word_size)) {
|
||||
do_collection_pause();
|
||||
}
|
||||
@ -5192,7 +5120,7 @@ class RegionCounter: public HeapRegionClosure {
|
||||
public:
|
||||
RegionCounter() : _n(0) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->is_empty() && !r->popular()) {
|
||||
if (r->is_empty()) {
|
||||
assert(!r->isHumongous(), "H regions should not be empty.");
|
||||
_n++;
|
||||
}
|
||||
@ -5336,14 +5264,8 @@ public:
|
||||
r->set_zero_fill_allocated();
|
||||
} else {
|
||||
assert(r->is_empty(), "tautology");
|
||||
if (r->popular()) {
|
||||
if (r->zero_fill_state() != HeapRegion::Allocated) {
|
||||
r->ensure_zero_filled_locked();
|
||||
r->set_zero_fill_allocated();
|
||||
}
|
||||
} else {
|
||||
_n++;
|
||||
switch (r->zero_fill_state()) {
|
||||
_n++;
|
||||
switch (r->zero_fill_state()) {
|
||||
case HeapRegion::NotZeroFilled:
|
||||
case HeapRegion::ZeroFilling:
|
||||
_g1->put_region_on_unclean_list_locked(r);
|
||||
@ -5354,7 +5276,6 @@ public:
|
||||
case HeapRegion::ZeroFilled:
|
||||
_g1->put_free_region_on_list_locked(r);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -5402,376 +5323,6 @@ void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
|
||||
heap_region_iterate(&rs);
|
||||
}
|
||||
|
||||
class CountObjClosure: public ObjectClosure {
|
||||
size_t _n;
|
||||
public:
|
||||
CountObjClosure() : _n(0) {}
|
||||
void do_object(oop obj) { _n++; }
|
||||
size_t n() { return _n; }
|
||||
};
|
||||
|
||||
size_t G1CollectedHeap::pop_object_used_objs() {
|
||||
size_t sum_objs = 0;
|
||||
for (int i = 0; i < G1NumPopularRegions; i++) {
|
||||
CountObjClosure cl;
|
||||
_hrs->at(i)->object_iterate(&cl);
|
||||
sum_objs += cl.n();
|
||||
}
|
||||
return sum_objs;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::pop_object_used_bytes() {
|
||||
size_t sum_bytes = 0;
|
||||
for (int i = 0; i < G1NumPopularRegions; i++) {
|
||||
sum_bytes += _hrs->at(i)->used();
|
||||
}
|
||||
return sum_bytes;
|
||||
}
|
||||
|
||||
|
||||
static int nq = 0;
|
||||
|
||||
HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) {
|
||||
while (_cur_pop_hr_index < G1NumPopularRegions) {
|
||||
HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
|
||||
HeapWord* res = cur_pop_region->allocate(word_size);
|
||||
if (res != NULL) {
|
||||
// We account for popular objs directly in the used summary:
|
||||
_summary_bytes_used += (word_size * HeapWordSize);
|
||||
return res;
|
||||
}
|
||||
// Otherwise, try the next region (first making sure that we remember
|
||||
// the last "top" value as the "next_top_at_mark_start", so that
|
||||
// objects made popular during markings aren't automatically considered
|
||||
// live).
|
||||
cur_pop_region->note_end_of_copying();
|
||||
// Otherwise, try the next region.
|
||||
_cur_pop_hr_index++;
|
||||
}
|
||||
// XXX: For now !!!
|
||||
vm_exit_out_of_memory(word_size,
|
||||
"Not enough pop obj space (To Be Fixed)");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
class HeapRegionList: public CHeapObj {
|
||||
public:
|
||||
HeapRegion* hr;
|
||||
HeapRegionList* next;
|
||||
};
|
||||
|
||||
void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) {
|
||||
// This might happen during parallel GC, so protect by this lock.
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
// We don't schedule regions whose evacuations are already pending, or
|
||||
// are already being evacuated.
|
||||
if (!r->popular_pending() && !r->in_collection_set()) {
|
||||
r->set_popular_pending(true);
|
||||
if (G1TracePopularity) {
|
||||
gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" "
|
||||
"["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.",
|
||||
r, r->bottom(), r->end());
|
||||
}
|
||||
HeapRegionList* hrl = new HeapRegionList;
|
||||
hrl->hr = r;
|
||||
hrl->next = _popular_regions_to_be_evacuated;
|
||||
_popular_regions_to_be_evacuated = hrl;
|
||||
}
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::popular_region_to_evac() {
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
HeapRegion* res = NULL;
|
||||
while (_popular_regions_to_be_evacuated != NULL && res == NULL) {
|
||||
HeapRegionList* hrl = _popular_regions_to_be_evacuated;
|
||||
_popular_regions_to_be_evacuated = hrl->next;
|
||||
res = hrl->hr;
|
||||
// The G1RSPopLimit may have increased, so recheck here...
|
||||
if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) {
|
||||
// Hah: don't need to schedule.
|
||||
if (G1TracePopularity) {
|
||||
gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" "
|
||||
"["PTR_FORMAT", "PTR_FORMAT") "
|
||||
"for pop-object evacuation (size %d < limit %d)",
|
||||
res, res->bottom(), res->end(),
|
||||
res->rem_set()->occupied(), G1RSPopLimit);
|
||||
}
|
||||
res->set_popular_pending(false);
|
||||
res = NULL;
|
||||
}
|
||||
// We do not reset res->popular() here; if we did so, it would allow
|
||||
// the region to be "rescheduled" for popularity evacuation. Instead,
|
||||
// this is done in the collection pause, with the world stopped.
|
||||
// So the invariant is that the regions in the list have the popularity
|
||||
// boolean set, but having the boolean set does not imply membership
|
||||
// on the list (though there can at most one such pop-pending region
|
||||
// not on the list at any time).
|
||||
delete hrl;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::evac_popular_region(HeapRegion* hr) {
|
||||
while (true) {
|
||||
// Don't want to do a GC pause while cleanup is being completed!
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
// Read the GC count while holding the Heap_lock
|
||||
int gc_count_before = SharedHeap::heap()->total_collections();
|
||||
g1_policy()->record_stop_world_start();
|
||||
|
||||
{
|
||||
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
||||
VM_G1PopRegionCollectionPause op(gc_count_before, hr);
|
||||
VMThread::execute(&op);
|
||||
|
||||
// If the prolog succeeded, we didn't do a GC for this.
|
||||
if (op.prologue_succeeded()) break;
|
||||
}
|
||||
// Otherwise we didn't. We should recheck the size, though, since
|
||||
// the limit may have increased...
|
||||
if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) {
|
||||
hr->set_popular_pending(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::atomic_inc_obj_rc(oop obj) {
|
||||
Atomic::inc(obj_rc_addr(obj));
|
||||
}
|
||||
|
||||
class CountRCClosure: public OopsInHeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
bool _parallel;
|
||||
public:
|
||||
CountRCClosure(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _parallel(ParallelGCThreads > 0)
|
||||
{}
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "Precondition.");
|
||||
if (_parallel) {
|
||||
// We go sticky at the limit to avoid excess contention.
|
||||
// If we want to track the actual RC's further, we'll need to keep a
|
||||
// per-thread hash table or something for the popular objects.
|
||||
if (_g1h->obj_rc(obj) < G1ObjPopLimit) {
|
||||
_g1h->atomic_inc_obj_rc(obj);
|
||||
}
|
||||
} else {
|
||||
_g1h->inc_obj_rc(obj);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class EvacPopObjClosure: public ObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _pop_objs;
|
||||
size_t _max_rc;
|
||||
public:
|
||||
EvacPopObjClosure(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _pop_objs(0), _max_rc(0) {}
|
||||
|
||||
void do_object(oop obj) {
|
||||
size_t rc = _g1h->obj_rc(obj);
|
||||
_max_rc = MAX2(rc, _max_rc);
|
||||
if (rc >= (size_t) G1ObjPopLimit) {
|
||||
_g1h->_pop_obj_rc_at_copy.add((double)rc);
|
||||
size_t word_sz = obj->size();
|
||||
HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz);
|
||||
oop new_pop_obj = (oop)new_pop_loc;
|
||||
Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz);
|
||||
obj->forward_to(new_pop_obj);
|
||||
G1ScanAndBalanceClosure scan_and_balance(_g1h);
|
||||
new_pop_obj->oop_iterate_backwards(&scan_and_balance);
|
||||
// preserve "next" mark bit if marking is in progress.
|
||||
if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) {
|
||||
_g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj);
|
||||
}
|
||||
|
||||
if (G1TracePopularity) {
|
||||
gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT
|
||||
" pop (%d), move to " PTR_FORMAT,
|
||||
(void*) obj, word_sz,
|
||||
_g1h->obj_rc(obj), (void*) new_pop_obj);
|
||||
}
|
||||
_pop_objs++;
|
||||
}
|
||||
}
|
||||
size_t pop_objs() { return _pop_objs; }
|
||||
size_t max_rc() { return _max_rc; }
|
||||
};
|
||||
|
||||
class G1ParCountRCTask : public AbstractGangTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
BitMap _bm;
|
||||
|
||||
size_t getNCards() {
|
||||
return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
|
||||
/ G1BlockOffsetSharedArray::N_bytes;
|
||||
}
|
||||
CountRCClosure _count_rc_closure;
|
||||
public:
|
||||
G1ParCountRCTask(G1CollectedHeap* g1h) :
|
||||
AbstractGangTask("G1 Par RC Count task"),
|
||||
_g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h)
|
||||
{}
|
||||
|
||||
void work(int i) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i);
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) {
|
||||
// We're evacuating a single region (for popularity).
|
||||
if (G1TracePopularity) {
|
||||
gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")",
|
||||
popular_region->bottom(), popular_region->end());
|
||||
}
|
||||
g1_policy()->set_single_region_collection_set(popular_region);
|
||||
size_t max_rc;
|
||||
if (!compute_reference_counts_and_evac_popular(popular_region,
|
||||
&max_rc)) {
|
||||
// We didn't evacuate any popular objects.
|
||||
// We increase the RS popularity limit, to prevent this from
|
||||
// happening in the future.
|
||||
if (G1RSPopLimit < (1 << 30)) {
|
||||
G1RSPopLimit *= 2;
|
||||
}
|
||||
// For now, interesting enough for a message:
|
||||
#if 1
|
||||
gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), "
|
||||
"failed to find a pop object (max = %d).",
|
||||
popular_region->bottom(), popular_region->end(),
|
||||
max_rc);
|
||||
gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit);
|
||||
#endif // 0
|
||||
// Also, we reset the collection set to NULL, to make the rest of
|
||||
// the collection do nothing.
|
||||
assert(popular_region->next_in_collection_set() == NULL,
|
||||
"should be single-region.");
|
||||
popular_region->set_in_collection_set(false);
|
||||
popular_region->set_popular_pending(false);
|
||||
g1_policy()->clear_collection_set();
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::
|
||||
compute_reference_counts_and_evac_popular(HeapRegion* popular_region,
|
||||
size_t* max_rc) {
|
||||
HeapWord* rc_region_bot;
|
||||
HeapWord* rc_region_end;
|
||||
|
||||
// Set up the reference count region.
|
||||
HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords);
|
||||
if (rc_region != NULL) {
|
||||
rc_region_bot = rc_region->bottom();
|
||||
rc_region_end = rc_region->end();
|
||||
} else {
|
||||
rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords);
|
||||
if (rc_region_bot == NULL) {
|
||||
vm_exit_out_of_memory(HeapRegion::GrainWords,
|
||||
"No space for RC region.");
|
||||
}
|
||||
rc_region_end = rc_region_bot + HeapRegion::GrainWords;
|
||||
}
|
||||
|
||||
if (G1TracePopularity)
|
||||
gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")",
|
||||
rc_region_bot, rc_region_end);
|
||||
if (rc_region_bot > popular_region->bottom()) {
|
||||
_rc_region_above = true;
|
||||
_rc_region_diff =
|
||||
pointer_delta(rc_region_bot, popular_region->bottom(), 1);
|
||||
} else {
|
||||
assert(rc_region_bot < popular_region->bottom(), "Can't be equal.");
|
||||
_rc_region_above = false;
|
||||
_rc_region_diff =
|
||||
pointer_delta(popular_region->bottom(), rc_region_bot, 1);
|
||||
}
|
||||
g1_policy()->record_pop_compute_rc_start();
|
||||
// Count external references.
|
||||
g1_rem_set()->prepare_for_oops_into_collection_set_do();
|
||||
if (ParallelGCThreads > 0) {
|
||||
|
||||
set_par_threads(workers()->total_workers());
|
||||
G1ParCountRCTask par_count_rc_task(this);
|
||||
workers()->run_task(&par_count_rc_task);
|
||||
set_par_threads(0);
|
||||
|
||||
} else {
|
||||
CountRCClosure count_rc_closure(this);
|
||||
g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0);
|
||||
}
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
g1_policy()->record_pop_compute_rc_end();
|
||||
|
||||
// Now evacuate popular objects.
|
||||
g1_policy()->record_pop_evac_start();
|
||||
EvacPopObjClosure evac_pop_obj_cl(this);
|
||||
popular_region->object_iterate(&evac_pop_obj_cl);
|
||||
*max_rc = evac_pop_obj_cl.max_rc();
|
||||
|
||||
// Make sure the last "top" value of the current popular region is copied
|
||||
// as the "next_top_at_mark_start", so that objects made popular during
|
||||
// markings aren't automatically considered live.
|
||||
HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
|
||||
cur_pop_region->note_end_of_copying();
|
||||
|
||||
if (rc_region != NULL) {
|
||||
free_region(rc_region);
|
||||
} else {
|
||||
FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot);
|
||||
}
|
||||
g1_policy()->record_pop_evac_end();
|
||||
|
||||
return evac_pop_obj_cl.pop_objs() > 0;
|
||||
}
|
||||
|
||||
class CountPopObjInfoClosure: public HeapRegionClosure {
|
||||
size_t _objs;
|
||||
size_t _bytes;
|
||||
|
||||
class CountObjClosure: public ObjectClosure {
|
||||
int _n;
|
||||
public:
|
||||
CountObjClosure() : _n(0) {}
|
||||
void do_object(oop obj) { _n++; }
|
||||
size_t n() { return _n; }
|
||||
};
|
||||
|
||||
public:
|
||||
CountPopObjInfoClosure() : _objs(0), _bytes(0) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
_bytes += r->used();
|
||||
CountObjClosure blk;
|
||||
r->object_iterate(&blk);
|
||||
_objs += blk.n();
|
||||
return false;
|
||||
}
|
||||
size_t objs() { return _objs; }
|
||||
size_t bytes() { return _bytes; }
|
||||
};
|
||||
|
||||
|
||||
void G1CollectedHeap::print_popularity_summary_info() const {
|
||||
CountPopObjInfoClosure blk;
|
||||
for (int i = 0; i <= _cur_pop_hr_index; i++) {
|
||||
blk.doHeapRegion(_hrs->at(i));
|
||||
}
|
||||
gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.",
|
||||
blk.objs(), blk.bytes());
|
||||
gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].",
|
||||
_pop_obj_rc_at_copy.avg(),
|
||||
_pop_obj_rc_at_copy.maximum(),
|
||||
_pop_obj_rc_at_copy.sd());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||
_refine_cte_cl->set_concurrent(concurrent);
|
||||
}
|
||||
@ -5845,7 +5396,6 @@ bool G1CollectedHeap::regions_accounted_for() {
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::print_region_accounting_info() {
|
||||
gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions);
|
||||
gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
|
||||
free_regions(),
|
||||
count_free_regions(), count_free_regions_list(),
|
||||
|
@ -29,7 +29,6 @@
|
||||
|
||||
class HeapRegion;
|
||||
class HeapRegionSeq;
|
||||
class HeapRegionList;
|
||||
class PermanentGenerationSpec;
|
||||
class GenerationSpec;
|
||||
class OopsInHeapRegionClosure;
|
||||
@ -143,7 +142,6 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class VM_GenCollectForPermanentAllocation;
|
||||
friend class VM_G1CollectFull;
|
||||
friend class VM_G1IncCollectionPause;
|
||||
friend class VM_G1PopRegionCollectionPause;
|
||||
friend class VMStructs;
|
||||
|
||||
// Closures used in implementation.
|
||||
@ -253,10 +251,6 @@ private:
|
||||
// than the current allocation region.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// Summary information about popular objects; method to print it.
|
||||
NumberSeq _pop_obj_rc_at_copy;
|
||||
void print_popularity_summary_info() const;
|
||||
|
||||
// This is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Basically, we have an array, with one
|
||||
// byte per region, and that byte denotes whether the corresponding
|
||||
@ -447,10 +441,8 @@ protected:
|
||||
virtual void do_collection_pause();
|
||||
|
||||
// The guts of the incremental collection pause, executed by the vm
|
||||
// thread. If "popular_region" is non-NULL, this pause should evacuate
|
||||
// this single region whose remembered set has gotten large, moving
|
||||
// any popular objects to one of the popular regions.
|
||||
virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region);
|
||||
// thread.
|
||||
virtual void do_collection_pause_at_safepoint();
|
||||
|
||||
// Actually do the work of evacuating the collection set.
|
||||
virtual void evacuate_collection_set();
|
||||
@ -625,67 +617,10 @@ protected:
|
||||
|
||||
SubTasksDone* _process_strong_tasks;
|
||||
|
||||
// Allocate space to hold a popular object. Result is guaranteed below
|
||||
// "popular_object_boundary()". Note: CURRENTLY halts the system if we
|
||||
// run out of space to hold popular objects.
|
||||
HeapWord* allocate_popular_object(size_t word_size);
|
||||
|
||||
// The boundary between popular and non-popular objects.
|
||||
HeapWord* _popular_object_boundary;
|
||||
|
||||
HeapRegionList* _popular_regions_to_be_evacuated;
|
||||
|
||||
// Compute which objects in "single_region" are popular. If any are,
|
||||
// evacuate them to a popular region, leaving behind forwarding pointers,
|
||||
// and select "popular_region" as the single collection set region.
|
||||
// Otherwise, leave the collection set null.
|
||||
void popularity_pause_preamble(HeapRegion* populer_region);
|
||||
|
||||
// Compute which objects in "single_region" are popular, and evacuate
|
||||
// them to a popular region, leaving behind forwarding pointers.
|
||||
// Returns "true" if at least one popular object is discovered and
|
||||
// evacuated. In any case, "*max_rc" is set to the maximum reference
|
||||
// count of an object in the region.
|
||||
bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region,
|
||||
size_t* max_rc);
|
||||
// Subroutines used in the above.
|
||||
bool _rc_region_above;
|
||||
size_t _rc_region_diff;
|
||||
jint* obj_rc_addr(oop obj) {
|
||||
uintptr_t obj_addr = (uintptr_t)obj;
|
||||
if (_rc_region_above) {
|
||||
jint* res = (jint*)(obj_addr + _rc_region_diff);
|
||||
assert((uintptr_t)res > obj_addr, "RC region is above.");
|
||||
return res;
|
||||
} else {
|
||||
jint* res = (jint*)(obj_addr - _rc_region_diff);
|
||||
assert((uintptr_t)res < obj_addr, "RC region is below.");
|
||||
return res;
|
||||
}
|
||||
}
|
||||
jint obj_rc(oop obj) {
|
||||
return *obj_rc_addr(obj);
|
||||
}
|
||||
void inc_obj_rc(oop obj) {
|
||||
(*obj_rc_addr(obj))++;
|
||||
}
|
||||
void atomic_inc_obj_rc(oop obj);
|
||||
|
||||
|
||||
// Number of popular objects and bytes (latter is cheaper!).
|
||||
size_t pop_object_used_objs();
|
||||
size_t pop_object_used_bytes();
|
||||
|
||||
// Index of the popular region in which allocation is currently being
|
||||
// done.
|
||||
int _cur_pop_hr_index;
|
||||
|
||||
// List of regions which require zero filling.
|
||||
UncleanRegionList _unclean_region_list;
|
||||
bool _unclean_regions_coming;
|
||||
|
||||
bool check_age_cohort_well_formed_work(int a, HeapRegion* hr);
|
||||
|
||||
public:
|
||||
void set_refine_cte_cl_concurrency(bool concurrent);
|
||||
|
||||
@ -930,14 +865,25 @@ public:
|
||||
|
||||
// Iterate over all the ref-containing fields of all objects, calling
|
||||
// "cl.do_oop" on each.
|
||||
virtual void oop_iterate(OopClosure* cl);
|
||||
virtual void oop_iterate(OopClosure* cl) {
|
||||
oop_iterate(cl, true);
|
||||
}
|
||||
void oop_iterate(OopClosure* cl, bool do_perm);
|
||||
|
||||
// Same as above, restricted to a memory region.
|
||||
virtual void oop_iterate(MemRegion mr, OopClosure* cl);
|
||||
virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
|
||||
oop_iterate(mr, cl, true);
|
||||
}
|
||||
void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
|
||||
virtual void object_iterate(ObjectClosure* cl) {
|
||||
object_iterate(cl, true);
|
||||
}
|
||||
virtual void safe_object_iterate(ObjectClosure* cl) {
|
||||
object_iterate(cl, true);
|
||||
}
|
||||
void object_iterate(ObjectClosure* cl, bool do_perm);
|
||||
|
||||
// Iterate over all objects allocated since the last collection, calling
|
||||
// "cl.do_object" on each. The heap must have been initialized properly
|
||||
@ -1066,21 +1012,6 @@ public:
|
||||
// words.
|
||||
virtual size_t large_typearray_limit();
|
||||
|
||||
// All popular objects are guaranteed to have addresses below this
|
||||
// boundary.
|
||||
HeapWord* popular_object_boundary() {
|
||||
return _popular_object_boundary;
|
||||
}
|
||||
|
||||
// Declare the region as one that should be evacuated because its
|
||||
// remembered set is too large.
|
||||
void schedule_popular_region_evac(HeapRegion* r);
|
||||
// If there is a popular region to evacuate it, remove it from the list
|
||||
// and return it.
|
||||
HeapRegion* popular_region_to_evac();
|
||||
// Evacuate the given popular region.
|
||||
void evac_popular_region(HeapRegion* r);
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool isHumongous(size_t word_size) {
|
||||
return word_size >= VeryLargeInWords;
|
||||
|
@ -91,10 +91,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
_all_mod_union_times_ms(new NumberSeq()),
|
||||
|
||||
_non_pop_summary(new NonPopSummary()),
|
||||
_pop_summary(new PopSummary()),
|
||||
_non_pop_abandoned_summary(new NonPopAbandonedSummary()),
|
||||
_pop_abandoned_summary(new PopAbandonedSummary()),
|
||||
_summary(new Summary()),
|
||||
_abandoned_summary(new AbandonedSummary()),
|
||||
|
||||
_cur_clear_ct_time_ms(0.0),
|
||||
|
||||
@ -109,9 +107,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_cur_aux_times_ms(new double[_aux_num]),
|
||||
_cur_aux_times_set(new bool[_aux_num]),
|
||||
|
||||
_pop_compute_rc_start(0.0),
|
||||
_pop_evac_start(0.0),
|
||||
|
||||
_concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
@ -224,16 +219,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
_par_last_termination_times_ms = new double[_parallel_gc_threads];
|
||||
|
||||
// we store the data from the first pass during popularity pauses
|
||||
_pop_par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
|
||||
_pop_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
|
||||
_pop_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
|
||||
|
||||
_pop_par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
|
||||
_pop_par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
|
||||
|
||||
_pop_par_last_closure_app_times_ms = new double[_parallel_gc_threads];
|
||||
|
||||
// start conservatively
|
||||
_expensive_region_limit_ms = 0.5 * (double) G1MaxPauseTimeMS;
|
||||
|
||||
@ -1047,23 +1032,6 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
calculate_young_list_target_config();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_pop_compute_rc_start() {
|
||||
_pop_compute_rc_start = os::elapsedTime();
|
||||
}
|
||||
void G1CollectorPolicy::record_pop_compute_rc_end() {
|
||||
double ms = (os::elapsedTime() - _pop_compute_rc_start)*1000.0;
|
||||
_cur_popular_compute_rc_time_ms = ms;
|
||||
_pop_compute_rc_start = 0.0;
|
||||
}
|
||||
void G1CollectorPolicy::record_pop_evac_start() {
|
||||
_pop_evac_start = os::elapsedTime();
|
||||
}
|
||||
void G1CollectorPolicy::record_pop_evac_end() {
|
||||
double ms = (os::elapsedTime() - _pop_evac_start)*1000.0;
|
||||
_cur_popular_evac_time_ms = ms;
|
||||
_pop_evac_start = 0.0;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
|
||||
_bytes_in_to_space_before_gc += bytes;
|
||||
}
|
||||
@ -1120,13 +1088,6 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
_par_last_scan_new_refs_times_ms[i] = -666.0;
|
||||
_par_last_obj_copy_times_ms[i] = -666.0;
|
||||
_par_last_termination_times_ms[i] = -666.0;
|
||||
|
||||
_pop_par_last_update_rs_start_times_ms[i] = -666.0;
|
||||
_pop_par_last_update_rs_times_ms[i] = -666.0;
|
||||
_pop_par_last_update_rs_processed_buffers[i] = -666.0;
|
||||
_pop_par_last_scan_rs_start_times_ms[i] = -666.0;
|
||||
_pop_par_last_scan_rs_times_ms[i] = -666.0;
|
||||
_pop_par_last_closure_app_times_ms[i] = -666.0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1185,25 +1146,6 @@ void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) {
|
||||
guarantee( false, "we should never reach here" );
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_popular_pause_preamble_start() {
|
||||
_cur_popular_preamble_start_ms = os::elapsedTime() * 1000.0;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_popular_pause_preamble_end() {
|
||||
_cur_popular_preamble_time_ms =
|
||||
(os::elapsedTime() * 1000.0) - _cur_popular_preamble_start_ms;
|
||||
|
||||
// copy the recorded statistics of the first pass to temporary arrays
|
||||
for (int i = 0; i < _parallel_gc_threads; ++i) {
|
||||
_pop_par_last_update_rs_start_times_ms[i] = _par_last_update_rs_start_times_ms[i];
|
||||
_pop_par_last_update_rs_times_ms[i] = _par_last_update_rs_times_ms[i];
|
||||
_pop_par_last_update_rs_processed_buffers[i] = _par_last_update_rs_processed_buffers[i];
|
||||
_pop_par_last_scan_rs_start_times_ms[i] = _par_last_scan_rs_start_times_ms[i];
|
||||
_pop_par_last_scan_rs_times_ms[i] = _par_last_scan_rs_times_ms[i];
|
||||
_pop_par_last_closure_app_times_ms[i] = _par_last_obj_copy_times_ms[i];
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
|
||||
_mark_closure_time_ms = mark_closure_time_ms;
|
||||
}
|
||||
@ -1465,8 +1407,7 @@ double G1CollectorPolicy::max_sum (double* data1,
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
bool abandoned) {
|
||||
void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_ms = _last_pause_time_ms;
|
||||
bool parallel = ParallelGCThreads > 0;
|
||||
@ -1587,42 +1528,10 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
}
|
||||
|
||||
PauseSummary* summary;
|
||||
if (!abandoned && !popular)
|
||||
summary = _non_pop_summary;
|
||||
else if (!abandoned && popular)
|
||||
summary = _pop_summary;
|
||||
else if (abandoned && !popular)
|
||||
summary = _non_pop_abandoned_summary;
|
||||
else if (abandoned && popular)
|
||||
summary = _pop_abandoned_summary;
|
||||
else
|
||||
guarantee(false, "should not get here!");
|
||||
|
||||
double pop_update_rs_time;
|
||||
double pop_update_rs_processed_buffers;
|
||||
double pop_scan_rs_time;
|
||||
double pop_closure_app_time;
|
||||
double pop_other_time;
|
||||
|
||||
if (popular) {
|
||||
PopPreambleSummary* preamble_summary = summary->pop_preamble_summary();
|
||||
guarantee(preamble_summary != NULL, "should not be null!");
|
||||
|
||||
pop_update_rs_time = avg_value(_pop_par_last_update_rs_times_ms);
|
||||
pop_update_rs_processed_buffers =
|
||||
sum_of_values(_pop_par_last_update_rs_processed_buffers);
|
||||
pop_scan_rs_time = avg_value(_pop_par_last_scan_rs_times_ms);
|
||||
pop_closure_app_time = avg_value(_pop_par_last_closure_app_times_ms);
|
||||
pop_other_time = _cur_popular_preamble_time_ms -
|
||||
(pop_update_rs_time + pop_scan_rs_time + pop_closure_app_time +
|
||||
_cur_popular_evac_time_ms);
|
||||
|
||||
preamble_summary->record_pop_preamble_time_ms(_cur_popular_preamble_time_ms);
|
||||
preamble_summary->record_pop_update_rs_time_ms(pop_update_rs_time);
|
||||
preamble_summary->record_pop_scan_rs_time_ms(pop_scan_rs_time);
|
||||
preamble_summary->record_pop_closure_app_time_ms(pop_closure_app_time);
|
||||
preamble_summary->record_pop_evacuation_time_ms(_cur_popular_evac_time_ms);
|
||||
preamble_summary->record_pop_other_time_ms(pop_other_time);
|
||||
if (abandoned) {
|
||||
summary = _abandoned_summary;
|
||||
} else {
|
||||
summary = _summary;
|
||||
}
|
||||
|
||||
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
|
||||
@ -1694,8 +1603,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
}
|
||||
|
||||
double other_time_ms = elapsed_ms;
|
||||
if (popular)
|
||||
other_time_ms -= _cur_popular_preamble_time_ms;
|
||||
|
||||
if (!abandoned) {
|
||||
if (_satb_drain_time_set)
|
||||
@ -1712,41 +1619,24 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("%s%s, %1.8lf secs]",
|
||||
(popular && !abandoned) ? " (popular)" :
|
||||
(!popular && abandoned) ? " (abandoned)" :
|
||||
(popular && abandoned) ? " (popular/abandoned)" : "",
|
||||
abandoned ? " (abandoned)" : "",
|
||||
(last_pause_included_initial_mark) ? " (initial-mark)" : "",
|
||||
elapsed_ms / 1000.0);
|
||||
|
||||
if (!abandoned) {
|
||||
if (_satb_drain_time_set)
|
||||
if (_satb_drain_time_set) {
|
||||
print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
|
||||
if (_last_satb_drain_processed_buffers >= 0)
|
||||
}
|
||||
if (_last_satb_drain_processed_buffers >= 0) {
|
||||
print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
|
||||
}
|
||||
if (popular)
|
||||
print_stats(1, "Popularity Preamble", _cur_popular_preamble_time_ms);
|
||||
if (parallel) {
|
||||
if (popular) {
|
||||
print_par_stats(2, "Update RS (Start)", _pop_par_last_update_rs_start_times_ms, false);
|
||||
print_par_stats(2, "Update RS", _pop_par_last_update_rs_times_ms);
|
||||
}
|
||||
if (parallel) {
|
||||
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
|
||||
print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
|
||||
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
|
||||
if (G1RSBarrierUseQueue)
|
||||
print_par_buffers(3, "Processed Buffers",
|
||||
_pop_par_last_update_rs_processed_buffers, true);
|
||||
print_par_stats(2, "Scan RS", _pop_par_last_scan_rs_times_ms);
|
||||
print_par_stats(2, "Closure app", _pop_par_last_closure_app_times_ms);
|
||||
print_stats(2, "Evacuation", _cur_popular_evac_time_ms);
|
||||
print_stats(2, "Other", pop_other_time);
|
||||
}
|
||||
if (!abandoned) {
|
||||
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
|
||||
if (!popular) {
|
||||
print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
|
||||
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
|
||||
if (G1RSBarrierUseQueue)
|
||||
print_par_buffers(3, "Processed Buffers",
|
||||
_par_last_update_rs_processed_buffers, true);
|
||||
}
|
||||
_par_last_update_rs_processed_buffers, true);
|
||||
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
|
||||
print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
|
||||
print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms);
|
||||
@ -1757,25 +1647,11 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
print_par_stats(2, "Termination", _par_last_termination_times_ms);
|
||||
print_stats(2, "Other", parallel_other_time);
|
||||
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
|
||||
}
|
||||
} else {
|
||||
if (popular) {
|
||||
print_stats(2, "Update RS", pop_update_rs_time);
|
||||
} else {
|
||||
print_stats(1, "Update RS", update_rs_time);
|
||||
if (G1RSBarrierUseQueue)
|
||||
print_stats(3, "Processed Buffers",
|
||||
(int)pop_update_rs_processed_buffers);
|
||||
print_stats(2, "Scan RS", pop_scan_rs_time);
|
||||
print_stats(2, "Closure App", pop_closure_app_time);
|
||||
print_stats(2, "Evacuation", _cur_popular_evac_time_ms);
|
||||
print_stats(2, "Other", pop_other_time);
|
||||
}
|
||||
if (!abandoned) {
|
||||
if (!popular) {
|
||||
print_stats(1, "Update RS", update_rs_time);
|
||||
if (G1RSBarrierUseQueue)
|
||||
print_stats(2, "Processed Buffers",
|
||||
(int)update_rs_processed_buffers);
|
||||
}
|
||||
print_stats(2, "Processed Buffers",
|
||||
(int)update_rs_processed_buffers);
|
||||
print_stats(1, "Ext Root Scanning", ext_root_scan_time);
|
||||
print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
|
||||
print_stats(1, "Scan-Only Scanning", scan_only_time);
|
||||
@ -1855,7 +1731,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
|
||||
// <NEW PREDICTION>
|
||||
|
||||
if (!popular && update_stats) {
|
||||
if (update_stats) {
|
||||
double pause_time_ms = elapsed_ms;
|
||||
|
||||
size_t diff = 0;
|
||||
@ -2454,36 +2330,8 @@ void G1CollectorPolicy::check_other_times(int level,
|
||||
void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
bool parallel = ParallelGCThreads > 0;
|
||||
MainBodySummary* body_summary = summary->main_body_summary();
|
||||
PopPreambleSummary* preamble_summary = summary->pop_preamble_summary();
|
||||
|
||||
if (summary->get_total_seq()->num() > 0) {
|
||||
print_summary_sd(0,
|
||||
(preamble_summary == NULL) ? "Non-Popular Pauses" :
|
||||
"Popular Pauses",
|
||||
summary->get_total_seq());
|
||||
if (preamble_summary != NULL) {
|
||||
print_summary(1, "Popularity Preamble",
|
||||
preamble_summary->get_pop_preamble_seq());
|
||||
print_summary(2, "Update RS", preamble_summary->get_pop_update_rs_seq());
|
||||
print_summary(2, "Scan RS", preamble_summary->get_pop_scan_rs_seq());
|
||||
print_summary(2, "Closure App",
|
||||
preamble_summary->get_pop_closure_app_seq());
|
||||
print_summary(2, "Evacuation",
|
||||
preamble_summary->get_pop_evacuation_seq());
|
||||
print_summary(2, "Other", preamble_summary->get_pop_other_seq());
|
||||
{
|
||||
NumberSeq* other_parts[] = {
|
||||
preamble_summary->get_pop_update_rs_seq(),
|
||||
preamble_summary->get_pop_scan_rs_seq(),
|
||||
preamble_summary->get_pop_closure_app_seq(),
|
||||
preamble_summary->get_pop_evacuation_seq()
|
||||
};
|
||||
NumberSeq calc_other_times_ms(preamble_summary->get_pop_preamble_seq(),
|
||||
4, other_parts);
|
||||
check_other_times(2, preamble_summary->get_pop_other_seq(),
|
||||
&calc_other_times_ms);
|
||||
}
|
||||
}
|
||||
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
|
||||
if (body_summary != NULL) {
|
||||
print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
|
||||
if (parallel) {
|
||||
@ -2537,19 +2385,15 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
// parallel
|
||||
NumberSeq* other_parts[] = {
|
||||
body_summary->get_satb_drain_seq(),
|
||||
(preamble_summary == NULL) ? NULL :
|
||||
preamble_summary->get_pop_preamble_seq(),
|
||||
body_summary->get_parallel_seq(),
|
||||
body_summary->get_clear_ct_seq()
|
||||
};
|
||||
calc_other_times_ms = NumberSeq (summary->get_total_seq(),
|
||||
4, other_parts);
|
||||
calc_other_times_ms = NumberSeq(summary->get_total_seq(),
|
||||
3, other_parts);
|
||||
} else {
|
||||
// serial
|
||||
NumberSeq* other_parts[] = {
|
||||
body_summary->get_satb_drain_seq(),
|
||||
(preamble_summary == NULL) ? NULL :
|
||||
preamble_summary->get_pop_preamble_seq(),
|
||||
body_summary->get_update_rs_seq(),
|
||||
body_summary->get_ext_root_scan_seq(),
|
||||
body_summary->get_mark_stack_scan_seq(),
|
||||
@ -2558,16 +2402,11 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
body_summary->get_obj_copy_seq()
|
||||
};
|
||||
calc_other_times_ms = NumberSeq(summary->get_total_seq(),
|
||||
8, other_parts);
|
||||
7, other_parts);
|
||||
}
|
||||
} else {
|
||||
// abandoned
|
||||
NumberSeq* other_parts[] = {
|
||||
(preamble_summary == NULL) ? NULL :
|
||||
preamble_summary->get_pop_preamble_seq()
|
||||
};
|
||||
calc_other_times_ms = NumberSeq(summary->get_total_seq(),
|
||||
1, other_parts);
|
||||
calc_other_times_ms = NumberSeq();
|
||||
}
|
||||
check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
|
||||
}
|
||||
@ -2579,18 +2418,12 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectorPolicy::print_abandoned_summary(PauseSummary* non_pop_summary,
|
||||
PauseSummary* pop_summary) const {
|
||||
G1CollectorPolicy::print_abandoned_summary(PauseSummary* summary) const {
|
||||
bool printed = false;
|
||||
if (non_pop_summary->get_total_seq()->num() > 0) {
|
||||
if (summary->get_total_seq()->num() > 0) {
|
||||
printed = true;
|
||||
print_summary(non_pop_summary);
|
||||
print_summary(summary);
|
||||
}
|
||||
if (pop_summary->get_total_seq()->num() > 0) {
|
||||
printed = true;
|
||||
print_summary(pop_summary);
|
||||
}
|
||||
|
||||
if (!printed) {
|
||||
print_indent(0);
|
||||
gclog_or_tty->print_cr("none");
|
||||
@ -2608,15 +2441,11 @@ void G1CollectorPolicy::print_tracing_info() const {
|
||||
gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
|
||||
gclog_or_tty->print_cr("");
|
||||
|
||||
gclog_or_tty->print_cr("NON-POPULAR PAUSES");
|
||||
print_summary(_non_pop_summary);
|
||||
|
||||
gclog_or_tty->print_cr("POPULAR PAUSES");
|
||||
print_summary(_pop_summary);
|
||||
gclog_or_tty->print_cr("EVACUATION PAUSES");
|
||||
print_summary(_summary);
|
||||
|
||||
gclog_or_tty->print_cr("ABANDONED PAUSES");
|
||||
print_abandoned_summary(_non_pop_abandoned_summary,
|
||||
_pop_abandoned_summary);
|
||||
print_abandoned_summary(_abandoned_summary);
|
||||
|
||||
gclog_or_tty->print_cr("MISC");
|
||||
print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
|
||||
@ -2702,14 +2531,6 @@ void G1CollectorPolicy::update_conc_refine_data() {
|
||||
_conc_refine_enabled++;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::set_single_region_collection_set(HeapRegion* hr) {
|
||||
assert(collection_set() == NULL, "Must be no current CS.");
|
||||
_collection_set_size = 0;
|
||||
_collection_set_bytes_used_before = 0;
|
||||
add_to_collection_set(hr);
|
||||
count_CS_bytes_used();
|
||||
}
|
||||
|
||||
bool
|
||||
G1CollectorPolicy::should_add_next_region_to_young_list() {
|
||||
assert(in_young_gc_mode(), "should be in young GC mode");
|
||||
@ -2787,15 +2608,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
G1CollectorPolicy_BestRegionsFirst::
|
||||
set_single_region_collection_set(HeapRegion* hr) {
|
||||
G1CollectorPolicy::set_single_region_collection_set(hr);
|
||||
_collectionSetChooser->removeRegion(hr);
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
|
||||
word_size) {
|
||||
@ -3061,19 +2873,13 @@ add_to_collection_set(HeapRegion* hr) {
|
||||
|
||||
void
|
||||
G1CollectorPolicy_BestRegionsFirst::
|
||||
choose_collection_set(HeapRegion* pop_region) {
|
||||
choose_collection_set() {
|
||||
double non_young_start_time_sec;
|
||||
start_recording_regions();
|
||||
|
||||
if (pop_region != NULL) {
|
||||
_target_pause_time_ms = (double) G1MaxPauseTimeMS;
|
||||
} else {
|
||||
guarantee(_target_pause_time_ms > -1.0,
|
||||
"_target_pause_time_ms should have been set!");
|
||||
}
|
||||
|
||||
// pop region is either null (and so is CS), or else it *is* the CS.
|
||||
assert(_collection_set == pop_region, "Precondition");
|
||||
guarantee(_target_pause_time_ms > -1.0,
|
||||
"_target_pause_time_ms should have been set!");
|
||||
assert(_collection_set == NULL, "Precondition");
|
||||
|
||||
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
||||
double predicted_pause_time_ms = base_time_ms;
|
||||
@ -3100,15 +2906,13 @@ choose_collection_set(HeapRegion* pop_region) {
|
||||
size_t expansion_bytes =
|
||||
_g1->expansion_regions() * HeapRegion::GrainBytes;
|
||||
|
||||
if (pop_region == NULL) {
|
||||
_collection_set_bytes_used_before = 0;
|
||||
_collection_set_size = 0;
|
||||
}
|
||||
_collection_set_bytes_used_before = 0;
|
||||
_collection_set_size = 0;
|
||||
|
||||
// Adjust for expansion and slop.
|
||||
max_live_bytes = max_live_bytes + expansion_bytes;
|
||||
|
||||
assert(pop_region != NULL || _g1->regions_accounted_for(), "Region leakage!");
|
||||
assert(_g1->regions_accounted_for(), "Region leakage!");
|
||||
|
||||
HeapRegion* hr;
|
||||
if (in_young_gc_mode()) {
|
||||
@ -3135,14 +2939,9 @@ choose_collection_set(HeapRegion* pop_region) {
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr, true);
|
||||
time_remaining_ms -= predicted_time_ms;
|
||||
predicted_pause_time_ms += predicted_time_ms;
|
||||
if (hr == pop_region) {
|
||||
// The popular region was young. Skip over it.
|
||||
assert(hr->in_collection_set(), "It's the pop region.");
|
||||
} else {
|
||||
assert(!hr->in_collection_set(), "It's not the pop region.");
|
||||
add_to_collection_set(hr);
|
||||
record_cset_region(hr, true);
|
||||
}
|
||||
assert(!hr->in_collection_set(), "invariant");
|
||||
add_to_collection_set(hr);
|
||||
record_cset_region(hr, true);
|
||||
max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
|
||||
if (G1PolicyVerbose > 0) {
|
||||
gclog_or_tty->print_cr(" Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.",
|
||||
@ -3165,10 +2964,6 @@ choose_collection_set(HeapRegion* pop_region) {
|
||||
// don't bother adding more regions...
|
||||
goto choose_collection_set_end;
|
||||
}
|
||||
} else if (pop_region != NULL) {
|
||||
// We're not in young mode, and we chose a popular region; don't choose
|
||||
// any more.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!in_young_gc_mode() || !full_young_gcs()) {
|
||||
@ -3178,7 +2973,7 @@ choose_collection_set(HeapRegion* pop_region) {
|
||||
do {
|
||||
hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
|
||||
avg_prediction);
|
||||
if (hr != NULL && !hr->popular()) {
|
||||
if (hr != NULL) {
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
|
||||
time_remaining_ms -= predicted_time_ms;
|
||||
predicted_pause_time_ms += predicted_time_ms;
|
||||
@ -3225,8 +3020,8 @@ expand_if_possible(size_t numRegions) {
|
||||
}
|
||||
|
||||
void G1CollectorPolicy_BestRegionsFirst::
|
||||
record_collection_pause_end(bool popular, bool abandoned) {
|
||||
G1CollectorPolicy::record_collection_pause_end(popular, abandoned);
|
||||
record_collection_pause_end(bool abandoned) {
|
||||
G1CollectorPolicy::record_collection_pause_end(abandoned);
|
||||
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,6 @@ public: \
|
||||
}
|
||||
|
||||
class MainBodySummary;
|
||||
class PopPreambleSummary;
|
||||
|
||||
class PauseSummary: public CHeapObj {
|
||||
define_num_seq(total)
|
||||
@ -55,7 +54,6 @@ class PauseSummary: public CHeapObj {
|
||||
|
||||
public:
|
||||
virtual MainBodySummary* main_body_summary() { return NULL; }
|
||||
virtual PopPreambleSummary* pop_preamble_summary() { return NULL; }
|
||||
};
|
||||
|
||||
class MainBodySummary: public CHeapObj {
|
||||
@ -75,36 +73,13 @@ class MainBodySummary: public CHeapObj {
|
||||
define_num_seq(clear_ct) // parallel only
|
||||
};
|
||||
|
||||
class PopPreambleSummary: public CHeapObj {
|
||||
define_num_seq(pop_preamble)
|
||||
define_num_seq(pop_update_rs)
|
||||
define_num_seq(pop_scan_rs)
|
||||
define_num_seq(pop_closure_app)
|
||||
define_num_seq(pop_evacuation)
|
||||
define_num_seq(pop_other)
|
||||
};
|
||||
|
||||
class NonPopSummary: public PauseSummary,
|
||||
public MainBodySummary {
|
||||
class Summary: public PauseSummary,
|
||||
public MainBodySummary {
|
||||
public:
|
||||
virtual MainBodySummary* main_body_summary() { return this; }
|
||||
};
|
||||
|
||||
class PopSummary: public PauseSummary,
|
||||
public MainBodySummary,
|
||||
public PopPreambleSummary {
|
||||
public:
|
||||
virtual MainBodySummary* main_body_summary() { return this; }
|
||||
virtual PopPreambleSummary* pop_preamble_summary() { return this; }
|
||||
};
|
||||
|
||||
class NonPopAbandonedSummary: public PauseSummary {
|
||||
};
|
||||
|
||||
class PopAbandonedSummary: public PauseSummary,
|
||||
public PopPreambleSummary {
|
||||
public:
|
||||
virtual PopPreambleSummary* pop_preamble_summary() { return this; }
|
||||
class AbandonedSummary: public PauseSummary {
|
||||
};
|
||||
|
||||
class G1CollectorPolicy: public CollectorPolicy {
|
||||
@ -146,10 +121,6 @@ protected:
|
||||
double _cur_satb_drain_time_ms;
|
||||
double _cur_clear_ct_time_ms;
|
||||
bool _satb_drain_time_set;
|
||||
double _cur_popular_preamble_start_ms;
|
||||
double _cur_popular_preamble_time_ms;
|
||||
double _cur_popular_compute_rc_time_ms;
|
||||
double _cur_popular_evac_time_ms;
|
||||
|
||||
double _cur_CH_strong_roots_end_sec;
|
||||
double _cur_CH_strong_roots_dur_ms;
|
||||
@ -173,10 +144,8 @@ protected:
|
||||
TruncatedSeq* _concurrent_mark_remark_times_ms;
|
||||
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
|
||||
|
||||
NonPopSummary* _non_pop_summary;
|
||||
PopSummary* _pop_summary;
|
||||
NonPopAbandonedSummary* _non_pop_abandoned_summary;
|
||||
PopAbandonedSummary* _pop_abandoned_summary;
|
||||
Summary* _summary;
|
||||
AbandonedSummary* _abandoned_summary;
|
||||
|
||||
NumberSeq* _all_pause_times_ms;
|
||||
NumberSeq* _all_full_gc_times_ms;
|
||||
@ -210,18 +179,6 @@ protected:
|
||||
double* _par_last_obj_copy_times_ms;
|
||||
double* _par_last_termination_times_ms;
|
||||
|
||||
// there are two pases during popular pauses, so we need to store
|
||||
// somewhere the results of the first pass
|
||||
double* _pop_par_last_update_rs_start_times_ms;
|
||||
double* _pop_par_last_update_rs_times_ms;
|
||||
double* _pop_par_last_update_rs_processed_buffers;
|
||||
double* _pop_par_last_scan_rs_start_times_ms;
|
||||
double* _pop_par_last_scan_rs_times_ms;
|
||||
double* _pop_par_last_closure_app_times_ms;
|
||||
|
||||
double _pop_compute_rc_start;
|
||||
double _pop_evac_start;
|
||||
|
||||
// indicates that we are in young GC mode
|
||||
bool _in_young_gc_mode;
|
||||
|
||||
@ -634,8 +591,7 @@ protected:
|
||||
NumberSeq* calc_other_times_ms) const;
|
||||
|
||||
void print_summary (PauseSummary* stats) const;
|
||||
void print_abandoned_summary(PauseSummary* non_pop_summary,
|
||||
PauseSummary* pop_summary) const;
|
||||
void print_abandoned_summary(PauseSummary* summary) const;
|
||||
|
||||
void print_summary (int level, const char* str, NumberSeq* seq) const;
|
||||
void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
|
||||
@ -856,9 +812,6 @@ public:
|
||||
virtual void record_collection_pause_start(double start_time_sec,
|
||||
size_t start_used);
|
||||
|
||||
virtual void record_popular_pause_preamble_start();
|
||||
virtual void record_popular_pause_preamble_end();
|
||||
|
||||
// Must currently be called while the world is stopped.
|
||||
virtual void record_concurrent_mark_init_start();
|
||||
virtual void record_concurrent_mark_init_end();
|
||||
@ -881,7 +834,7 @@ public:
|
||||
virtual void record_collection_pause_end_CH_strong_roots();
|
||||
virtual void record_collection_pause_end_G1_strong_roots();
|
||||
|
||||
virtual void record_collection_pause_end(bool popular, bool abandoned);
|
||||
virtual void record_collection_pause_end(bool abandoned);
|
||||
|
||||
// Record the fact that a full collection occurred.
|
||||
virtual void record_full_collection_start();
|
||||
@ -990,12 +943,6 @@ public:
|
||||
_cur_aux_times_ms[i] += ms;
|
||||
}
|
||||
|
||||
void record_pop_compute_rc_start();
|
||||
void record_pop_compute_rc_end();
|
||||
|
||||
void record_pop_evac_start();
|
||||
void record_pop_evac_end();
|
||||
|
||||
// Record the fact that "bytes" bytes allocated in a region.
|
||||
void record_before_bytes(size_t bytes);
|
||||
void record_after_bytes(size_t bytes);
|
||||
@ -1008,9 +955,7 @@ public:
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set", and links them together. The head and number of
|
||||
// the collection set are available via access methods.
|
||||
// If "pop_region" is non-NULL, it is a popular region that has already
|
||||
// been added to the collection set.
|
||||
virtual void choose_collection_set(HeapRegion* pop_region = NULL) = 0;
|
||||
virtual void choose_collection_set() = 0;
|
||||
|
||||
void clear_collection_set() { _collection_set = NULL; }
|
||||
|
||||
@ -1018,9 +963,6 @@ public:
|
||||
// current collection set.
|
||||
HeapRegion* collection_set() { return _collection_set; }
|
||||
|
||||
// Sets the collection set to the given single region.
|
||||
virtual void set_single_region_collection_set(HeapRegion* hr);
|
||||
|
||||
// The number of elements in the current collection set.
|
||||
size_t collection_set_size() { return _collection_set_size; }
|
||||
|
||||
@ -1203,7 +1145,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
|
||||
// If the estimated is less then desirable, resize if possible.
|
||||
void expand_if_possible(size_t numRegions);
|
||||
|
||||
virtual void choose_collection_set(HeapRegion* pop_region = NULL);
|
||||
virtual void choose_collection_set();
|
||||
virtual void record_collection_pause_start(double start_time_sec,
|
||||
size_t start_used);
|
||||
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
||||
@ -1214,9 +1156,8 @@ public:
|
||||
G1CollectorPolicy_BestRegionsFirst() {
|
||||
_collectionSetChooser = new CollectionSetChooser();
|
||||
}
|
||||
void record_collection_pause_end(bool popular, bool abandoned);
|
||||
void record_collection_pause_end(bool abandoned);
|
||||
bool should_do_collection_pause(size_t word_size);
|
||||
virtual void set_single_region_collection_set(HeapRegion* hr);
|
||||
// This is not needed any more, after the CSet choosing code was
|
||||
// changed to use the pause prediction work. But let's leave the
|
||||
// hook in just in case.
|
||||
|
@ -157,7 +157,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
ModRefBarrierSet* _mrbs;
|
||||
CompactPoint _cp;
|
||||
bool _popular_only;
|
||||
|
||||
void free_humongous_region(HeapRegion* hr) {
|
||||
HeapWord* bot = hr->bottom();
|
||||
@ -172,17 +171,11 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
}
|
||||
|
||||
public:
|
||||
G1PrepareCompactClosure(CompactibleSpace* cs, bool popular_only) :
|
||||
G1PrepareCompactClosure(CompactibleSpace* cs) :
|
||||
_cp(NULL, cs, cs->initialize_threshold()),
|
||||
_mrbs(G1CollectedHeap::heap()->mr_bs()),
|
||||
_popular_only(popular_only)
|
||||
_mrbs(G1CollectedHeap::heap()->mr_bs())
|
||||
{}
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (_popular_only && !hr->popular())
|
||||
return true; // terminate early
|
||||
else if (!_popular_only && hr->popular())
|
||||
return false; // skip this one.
|
||||
|
||||
if (hr->isHumongous()) {
|
||||
if (hr->startsHumongous()) {
|
||||
oop obj = oop(hr->bottom());
|
||||
@ -203,20 +196,15 @@ public:
|
||||
return false;
|
||||
}
|
||||
};
|
||||
// Stolen verbatim from g1CollectedHeap.cpp
|
||||
|
||||
// Finds the first HeapRegion.
|
||||
class FindFirstRegionClosure: public HeapRegionClosure {
|
||||
HeapRegion* _a_region;
|
||||
bool _find_popular;
|
||||
public:
|
||||
FindFirstRegionClosure(bool find_popular) :
|
||||
_a_region(NULL), _find_popular(find_popular) {}
|
||||
FindFirstRegionClosure() : _a_region(NULL) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->popular() == _find_popular) {
|
||||
_a_region = r;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
_a_region = r;
|
||||
return true;
|
||||
}
|
||||
HeapRegion* result() { return _a_region; }
|
||||
};
|
||||
@ -242,30 +230,15 @@ void G1MarkSweep::mark_sweep_phase2() {
|
||||
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
|
||||
GenMarkSweep::trace("2");
|
||||
|
||||
// First we compact the popular regions.
|
||||
if (G1NumPopularRegions > 0) {
|
||||
CompactibleSpace* sp = g1h->first_compactible_space();
|
||||
FindFirstRegionClosure cl(true /*find_popular*/);
|
||||
g1h->heap_region_iterate(&cl);
|
||||
HeapRegion *r = cl.result();
|
||||
assert(r->popular(), "should have found a popular region.");
|
||||
assert(r == sp, "first popular heap region should "
|
||||
"== first compactible space");
|
||||
G1PrepareCompactClosure blk(sp, true/*popular_only*/);
|
||||
g1h->heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
// Now we do the regular regions.
|
||||
FindFirstRegionClosure cl(false /*find_popular*/);
|
||||
FindFirstRegionClosure cl;
|
||||
g1h->heap_region_iterate(&cl);
|
||||
HeapRegion *r = cl.result();
|
||||
assert(!r->popular(), "should have founda non-popular region.");
|
||||
CompactibleSpace* sp = r;
|
||||
if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
|
||||
sp = r->next_compaction_space();
|
||||
}
|
||||
|
||||
G1PrepareCompactClosure blk(sp, false/*popular_only*/);
|
||||
G1PrepareCompactClosure blk(sp);
|
||||
g1h->heap_region_iterate(&blk);
|
||||
|
||||
CompactPoint perm_cp(pg, NULL, NULL);
|
||||
|
@ -580,9 +580,7 @@ public:
|
||||
virtual void do_oop(oop* p) {
|
||||
HeapRegion* to = _g1->heap_region_containing(*p);
|
||||
if (to->in_collection_set()) {
|
||||
if (to->rem_set()->add_reference(p, 0)) {
|
||||
_g1->schedule_popular_region_evac(to);
|
||||
}
|
||||
to->rem_set()->add_reference(p, 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1024,9 +1022,8 @@ void HRInto_G1RemSet::print_summary_info() {
|
||||
gclog_or_tty->print_cr(" %d occupied cards represented.",
|
||||
blk.occupied());
|
||||
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
|
||||
" %s, cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
|
||||
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
|
||||
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
|
||||
(blk.max_mem_sz_region()->popular() ? "POP" : ""),
|
||||
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
|
||||
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
|
||||
gclog_or_tty->print_cr(" Did %d coarsenings.",
|
||||
|
@ -65,7 +65,6 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
// The test below could be optimized by applying a bit op to to and from.
|
||||
if (to != NULL && from != NULL && from != to) {
|
||||
bool update_delayed = false;
|
||||
// There is a tricky infinite loop if we keep pushing
|
||||
// self forwarding pointers onto our _new_refs list.
|
||||
// The _par_traversal_in_progress flag is true during the collection pause,
|
||||
@ -77,10 +76,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
update_delayed = true;
|
||||
}
|
||||
|
||||
if (!to->popular() && !update_delayed) {
|
||||
} else {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
|
||||
" for region [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
@ -88,9 +84,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||
to->bottom(), to->end());
|
||||
#endif
|
||||
assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
|
||||
if (to->rem_set()->add_reference(p, tid)) {
|
||||
_g1->schedule_popular_region_evac(to);
|
||||
}
|
||||
to->rem_set()->add_reference(p, tid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -185,15 +185,9 @@
|
||||
product(intx, G1InefficientPausePct, 80, \
|
||||
"Threshold of an 'inefficient' pauses (as % of cum efficiency.") \
|
||||
\
|
||||
product(intx, G1RSPopLimit, 32768, \
|
||||
"Limit that defines popularity. Should go away! XXX") \
|
||||
\
|
||||
develop(bool, G1RSCountHisto, false, \
|
||||
"If true, print a histogram of RS occupancies after each pause") \
|
||||
\
|
||||
product(intx, G1ObjPopLimit, 256, \
|
||||
"Limit that defines popularity for an object.") \
|
||||
\
|
||||
product(bool, G1TraceFileOverwrite, false, \
|
||||
"Allow the trace file to be overwritten") \
|
||||
\
|
||||
@ -201,16 +195,6 @@
|
||||
"When > 0, print the occupancies of the <n> best and worst" \
|
||||
"regions.") \
|
||||
\
|
||||
develop(bool, G1TracePopularity, false, \
|
||||
"When true, provide detailed tracing of popularity.") \
|
||||
\
|
||||
product(bool, G1SummarizePopularity, false, \
|
||||
"When true, provide end-of-run-summarization of popularity.") \
|
||||
\
|
||||
product(intx, G1NumPopularRegions, 1, \
|
||||
"Number of regions reserved to hold popular objects. " \
|
||||
"Should go away later.") \
|
||||
\
|
||||
develop(bool, G1PrintParCleanupStats, false, \
|
||||
"When true, print extra stats about parallel cleanup.") \
|
||||
\
|
||||
|
@ -104,7 +104,6 @@ public:
|
||||
HeapRegion* to = _g1h->heap_region_containing(*p);
|
||||
if (from != NULL && to != NULL &&
|
||||
from != to &&
|
||||
!to->popular() &&
|
||||
!to->isHumongous()) {
|
||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||
jbyte cv_field = *_bs->byte_for_const(p);
|
||||
@ -285,8 +284,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
|
||||
}
|
||||
zero_marked_bytes();
|
||||
set_sort_index(-1);
|
||||
if ((uintptr_t)bottom() >= (uintptr_t)g1h->popular_object_boundary())
|
||||
set_popular(false);
|
||||
|
||||
_offsets.resize(HeapRegion::GrainWords);
|
||||
init_top_at_mark_start();
|
||||
@ -371,7 +368,6 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
_next_in_special_set(NULL), _orig_end(NULL),
|
||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
|
||||
_popularity(NotPopular),
|
||||
_young_type(NotYoung), _next_young_region(NULL),
|
||||
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
|
||||
_rem_set(NULL), _zfs(NotZeroFilled)
|
||||
|
@ -238,15 +238,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// See "sort_index" method. -1 means is not in the array.
|
||||
int _sort_index;
|
||||
|
||||
// Means it has (or at least had) a very large RS, and should not be
|
||||
// considered for membership in a collection set.
|
||||
enum PopularityState {
|
||||
NotPopular,
|
||||
PopularPending,
|
||||
Popular
|
||||
};
|
||||
PopularityState _popularity;
|
||||
|
||||
// <PREDICTION>
|
||||
double _gc_efficiency;
|
||||
// </PREDICTION>
|
||||
@ -433,10 +424,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
bool is_reserved() {
|
||||
return popular();
|
||||
}
|
||||
|
||||
bool is_on_free_list() {
|
||||
return _is_on_free_list;
|
||||
}
|
||||
@ -609,23 +596,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
init_top_at_mark_start();
|
||||
}
|
||||
|
||||
bool popular() { return _popularity == Popular; }
|
||||
void set_popular(bool b) {
|
||||
if (b) {
|
||||
_popularity = Popular;
|
||||
} else {
|
||||
_popularity = NotPopular;
|
||||
}
|
||||
}
|
||||
bool popular_pending() { return _popularity == PopularPending; }
|
||||
void set_popular_pending(bool b) {
|
||||
if (b) {
|
||||
_popularity = PopularPending;
|
||||
} else {
|
||||
_popularity = NotPopular;
|
||||
}
|
||||
}
|
||||
|
||||
// <PREDICTION>
|
||||
void calc_gc_efficiency(void);
|
||||
double gc_efficiency() { return _gc_efficiency;}
|
||||
|
@ -188,32 +188,6 @@ private:
|
||||
// the _outgoing_region_map.
|
||||
void clear_outgoing_entries();
|
||||
|
||||
#if MAYBE
|
||||
// Audit the given card index.
|
||||
void audit_card(size_t card_num, HeapRegion* hr, u2* rc_arr,
|
||||
HeapRegionRemSet* empty_cards, size_t* one_obj_cards);
|
||||
|
||||
// Assumes that "audit_stage1" has been called for "hr", to set up
|
||||
// "shadow" and "new_rs" appropriately. Identifies individual popular
|
||||
// objects; returns "true" if any are found.
|
||||
bool audit_find_pop(HeapRegion* hr, u2* rc_arr);
|
||||
|
||||
// Assumes that "audit_stage1" has been called for "hr", to set up
|
||||
// "shadow" and "new_rs" appropriately. Identifies individual popular
|
||||
// objects, and determines the number of entries in "new_rs" if any such
|
||||
// popular objects are ignored. If this is sufficiently small, returns
|
||||
// "false" to indicate that a constraint should not be introduced.
|
||||
// Otherwise, returns "true" to indicate that we should go ahead with
|
||||
// adding the constraint.
|
||||
bool audit_stag(HeapRegion* hr, u2* rc_arr);
|
||||
|
||||
|
||||
u2* alloc_rc_array();
|
||||
|
||||
SeqHeapRegionRemSet* audit_post(u2* rc_arr, size_t multi_obj_crds,
|
||||
SeqHeapRegionRemSet* empty_cards);
|
||||
#endif
|
||||
|
||||
enum ParIterState { Unclaimed, Claimed, Complete };
|
||||
ParIterState _iter_state;
|
||||
|
||||
@ -261,16 +235,14 @@ public:
|
||||
|
||||
/* Used in the sequential case. Returns "true" iff this addition causes
|
||||
the size limit to be reached. */
|
||||
bool add_reference(oop* from) {
|
||||
void add_reference(oop* from) {
|
||||
_other_regions.add_reference(from);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Used in the parallel case. Returns "true" iff this addition causes
|
||||
the size limit to be reached. */
|
||||
bool add_reference(oop* from, int tid) {
|
||||
void add_reference(oop* from, int tid) {
|
||||
_other_regions.add_reference(from, tid);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Records the fact that the current region contains an outgoing
|
||||
@ -338,20 +310,6 @@ public:
|
||||
}
|
||||
void print() const;
|
||||
|
||||
#if MAYBE
|
||||
// We are about to introduce a constraint, requiring the collection time
|
||||
// of the region owning this RS to be <= "hr", and forgetting pointers
|
||||
// from the owning region to "hr." Before doing so, examines this rem
|
||||
// set for pointers to "hr", possibly identifying some popular objects.,
|
||||
// and possibly finding some cards to no longer contain pointers to "hr",
|
||||
//
|
||||
// These steps may prevent the the constraint from being necessary; in
|
||||
// which case returns a set of cards now thought to contain no pointers
|
||||
// into HR. In the normal (I assume) case, returns NULL, indicating that
|
||||
// we should go ahead and add the constraint.
|
||||
virtual SeqHeapRegionRemSet* audit(HeapRegion* hr) = 0;
|
||||
#endif
|
||||
|
||||
// Called during a stop-world phase to perform any deferred cleanups.
|
||||
// The second version may be called by parallel threads after then finish
|
||||
// collection work.
|
||||
|
@ -74,7 +74,6 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
||||
// [first, cur)
|
||||
HeapRegion* curhr = _regions.at(cur);
|
||||
if (curhr->is_empty()
|
||||
&& !curhr->is_reserved()
|
||||
&& (first == cur
|
||||
|| (_regions.at(cur-1)->end() ==
|
||||
curhr->bottom()))) {
|
||||
@ -121,35 +120,27 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
||||
}
|
||||
}
|
||||
|
||||
void HeapRegionSeq::print_empty_runs(bool reserved_are_empty) {
|
||||
void HeapRegionSeq::print_empty_runs() {
|
||||
int empty_run = 0;
|
||||
int n_empty = 0;
|
||||
bool at_least_one_reserved = false;
|
||||
int empty_run_start;
|
||||
for (int i = 0; i < _regions.length(); i++) {
|
||||
HeapRegion* r = _regions.at(i);
|
||||
if (r->continuesHumongous()) continue;
|
||||
if (r->is_empty() && (reserved_are_empty || !r->is_reserved())) {
|
||||
if (r->is_empty()) {
|
||||
assert(!r->isHumongous(), "H regions should not be empty.");
|
||||
if (empty_run == 0) empty_run_start = i;
|
||||
empty_run++;
|
||||
n_empty++;
|
||||
if (r->is_reserved()) {
|
||||
at_least_one_reserved = true;
|
||||
}
|
||||
} else {
|
||||
if (empty_run > 0) {
|
||||
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
|
||||
if (reserved_are_empty && at_least_one_reserved)
|
||||
gclog_or_tty->print("(R)");
|
||||
empty_run = 0;
|
||||
at_least_one_reserved = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (empty_run > 0) {
|
||||
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
|
||||
if (reserved_are_empty && at_least_one_reserved) gclog_or_tty->print("(R)");
|
||||
}
|
||||
gclog_or_tty->print_cr(" [tot = %d]", n_empty);
|
||||
}
|
||||
@ -193,7 +184,6 @@ size_t HeapRegionSeq::free_suffix() {
|
||||
int cur = first;
|
||||
while (cur >= 0 &&
|
||||
(_regions.at(cur)->is_empty()
|
||||
&& !_regions.at(cur)->is_reserved()
|
||||
&& (first == cur
|
||||
|| (_regions.at(cur+1)->bottom() ==
|
||||
_regions.at(cur)->end())))) {
|
||||
|
@ -104,8 +104,7 @@ class HeapRegionSeq: public CHeapObj {
|
||||
|
||||
void print();
|
||||
|
||||
// Prints out runs of empty regions. If the arg is "true" reserved
|
||||
// (popular regions are considered "empty".
|
||||
void print_empty_runs(bool reserved_are_empty);
|
||||
// Prints out runs of empty regions.
|
||||
void print_empty_runs();
|
||||
|
||||
};
|
||||
|
@ -43,16 +43,9 @@ void VM_G1IncCollectionPause::doit() {
|
||||
JvmtiGCForAllocationMarker jgcm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
|
||||
g1h->do_collection_pause_at_safepoint(NULL);
|
||||
g1h->do_collection_pause_at_safepoint();
|
||||
}
|
||||
|
||||
void VM_G1PopRegionCollectionPause::doit() {
|
||||
JvmtiGCForAllocationMarker jgcm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->do_collection_pause_at_safepoint(_pop_region);
|
||||
}
|
||||
|
||||
|
||||
void VM_CGC_Operation::doit() {
|
||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
|
@ -77,20 +77,6 @@ class VM_G1IncCollectionPause: public VM_GC_Operation {
|
||||
}
|
||||
};
|
||||
|
||||
class VM_G1PopRegionCollectionPause: public VM_GC_Operation {
|
||||
HeapRegion* _pop_region;
|
||||
public:
|
||||
VM_G1PopRegionCollectionPause(int gc_count_before, HeapRegion* pop_region) :
|
||||
VM_GC_Operation(gc_count_before),
|
||||
_pop_region(pop_region)
|
||||
{}
|
||||
virtual VMOp_Type type() const { return VMOp_G1PopRegionCollectionPause; }
|
||||
virtual void doit();
|
||||
virtual const char* name() const {
|
||||
return "garbage-first popular region collection pause";
|
||||
}
|
||||
};
|
||||
|
||||
// Concurrent GC stop-the-world operations such as initial and final mark;
|
||||
// consider sharing these with CMS's counterparts.
|
||||
class VM_CGC_Operation: public VM_Operation {
|
||||
|
@ -36,7 +36,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
ObjToScanQueueSet* work_queue_set_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
_to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_),
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
|
||||
_ageTable(false), // false ==> not the global age table, no perf data.
|
||||
_to_space_alloc_buffer(desired_plab_sz_),
|
||||
@ -57,6 +57,11 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
_start = os::elapsedTime();
|
||||
_old_gen_closure.set_generation(old_gen_);
|
||||
_old_gen_root_closure.set_generation(old_gen_);
|
||||
if (UseCompressedOops) {
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true);
|
||||
} else {
|
||||
_overflow_stack = NULL;
|
||||
}
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( pop )
|
||||
@ -81,7 +86,7 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
||||
assert(old->is_objArray(), "must be obj array");
|
||||
assert(old->is_forwarded(), "must be forwarded");
|
||||
assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
|
||||
assert(!_old_gen->is_in(old), "must be in young generation.");
|
||||
assert(!old_gen()->is_in(old), "must be in young generation.");
|
||||
|
||||
objArrayOop obj = objArrayOop(old->forwardee());
|
||||
// Process ParGCArrayScanChunk elements now
|
||||
@ -119,26 +124,68 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
||||
|
||||
void ParScanThreadState::trim_queues(int max_size) {
|
||||
ObjToScanQueue* queue = work_queue();
|
||||
while (queue->size() > (juint)max_size) {
|
||||
oop obj_to_scan;
|
||||
if (queue->pop_local(obj_to_scan)) {
|
||||
note_pop();
|
||||
|
||||
if ((HeapWord *)obj_to_scan < young_old_boundary()) {
|
||||
if (obj_to_scan->is_objArray() &&
|
||||
obj_to_scan->is_forwarded() &&
|
||||
obj_to_scan->forwardee() != obj_to_scan) {
|
||||
scan_partial_array_and_push_remainder(obj_to_scan);
|
||||
do {
|
||||
while (queue->size() > (juint)max_size) {
|
||||
oop obj_to_scan;
|
||||
if (queue->pop_local(obj_to_scan)) {
|
||||
note_pop();
|
||||
if ((HeapWord *)obj_to_scan < young_old_boundary()) {
|
||||
if (obj_to_scan->is_objArray() &&
|
||||
obj_to_scan->is_forwarded() &&
|
||||
obj_to_scan->forwardee() != obj_to_scan) {
|
||||
scan_partial_array_and_push_remainder(obj_to_scan);
|
||||
} else {
|
||||
// object is in to_space
|
||||
obj_to_scan->oop_iterate(&_to_space_closure);
|
||||
}
|
||||
} else {
|
||||
// object is in to_space
|
||||
obj_to_scan->oop_iterate(&_to_space_closure);
|
||||
// object is in old generation
|
||||
obj_to_scan->oop_iterate(&_old_gen_closure);
|
||||
}
|
||||
} else {
|
||||
// object is in old generation
|
||||
obj_to_scan->oop_iterate(&_old_gen_closure);
|
||||
}
|
||||
}
|
||||
// For the case of compressed oops, we have a private, non-shared
|
||||
// overflow stack, so we eagerly drain it so as to more evenly
|
||||
// distribute load early. Note: this may be good to do in
|
||||
// general rather than delay for the final stealing phase.
|
||||
// If applicable, we'll transfer a set of objects over to our
|
||||
// work queue, allowing them to be stolen and draining our
|
||||
// private overflow stack.
|
||||
} while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
|
||||
}
|
||||
|
||||
bool ParScanThreadState::take_from_overflow_stack() {
|
||||
assert(UseCompressedOops, "Else should not call");
|
||||
assert(young_gen()->overflow_list() == NULL, "Error");
|
||||
ObjToScanQueue* queue = work_queue();
|
||||
GrowableArray<oop>* of_stack = overflow_stack();
|
||||
uint num_overflow_elems = of_stack->length();
|
||||
uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4,
|
||||
(juint)ParGCDesiredObjsFromOverflowList),
|
||||
num_overflow_elems);
|
||||
// Transfer the most recent num_take_elems from the overflow
|
||||
// stack to our work queue.
|
||||
for (size_t i = 0; i != num_take_elems; i++) {
|
||||
oop cur = of_stack->pop();
|
||||
oop obj_to_push = cur->forwardee();
|
||||
assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
|
||||
assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
|
||||
assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
|
||||
if (should_be_partially_scanned(obj_to_push, cur)) {
|
||||
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
|
||||
obj_to_push = cur;
|
||||
}
|
||||
bool ok = queue->push(obj_to_push);
|
||||
assert(ok, "Should have succeeded");
|
||||
}
|
||||
assert(young_gen()->overflow_list() == NULL, "Error");
|
||||
return num_take_elems > 0; // was something transferred?
|
||||
}
|
||||
|
||||
void ParScanThreadState::push_on_overflow_stack(oop p) {
|
||||
assert(UseCompressedOops, "Else should not call");
|
||||
overflow_stack()->push(p);
|
||||
assert(young_gen()->overflow_list() == NULL, "Error");
|
||||
}
|
||||
|
||||
HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
|
||||
@ -425,8 +472,7 @@ void ParNewGenTask::work(int i) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
// We would need multiple old-gen queues otherwise.
|
||||
guarantee(gch->n_gens() == 2,
|
||||
"Par young collection currently only works with one older gen.");
|
||||
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
|
||||
|
||||
Generation* old_gen = gch->next_gen(_gen);
|
||||
|
||||
@ -1169,36 +1215,75 @@ bool ParNewGeneration::should_simulate_overflow() {
|
||||
}
|
||||
#endif
|
||||
|
||||
// In case we are using compressed oops, we need to be careful.
|
||||
// If the object being pushed is an object array, then its length
|
||||
// field keeps track of the "grey boundary" at which the next
|
||||
// incremental scan will be done (see ParGCArrayScanChunk).
|
||||
// When using compressed oops, this length field is kept in the
|
||||
// lower 32 bits of the erstwhile klass word and cannot be used
|
||||
// for the overflow chaining pointer (OCP below). As such the OCP
|
||||
// would itself need to be compressed into the top 32-bits in this
|
||||
// case. Unfortunately, see below, in the event that we have a
|
||||
// promotion failure, the node to be pushed on the list can be
|
||||
// outside of the Java heap, so the heap-based pointer compression
|
||||
// would not work (we would have potential aliasing between C-heap
|
||||
// and Java-heap pointers). For this reason, when using compressed
|
||||
// oops, we simply use a worker-thread-local, non-shared overflow
|
||||
// list in the form of a growable array, with a slightly different
|
||||
// overflow stack draining strategy. If/when we start using fat
|
||||
// stacks here, we can go back to using (fat) pointer chains
|
||||
// (although some performance comparisons would be useful since
|
||||
// single global lists have their own performance disadvantages
|
||||
// as we were made painfully aware not long ago, see 6786503).
|
||||
#define BUSY (oop(0x1aff1aff))
|
||||
void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
|
||||
// if the object has been forwarded to itself, then we cannot
|
||||
// use the klass pointer for the linked list. Instead we have
|
||||
// to allocate an oopDesc in the C-Heap and use that for the linked list.
|
||||
// XXX This is horribly inefficient when a promotion failure occurs
|
||||
// and should be fixed. XXX FIX ME !!!
|
||||
assert(is_in_reserved(from_space_obj), "Should be from this generation");
|
||||
if (UseCompressedOops) {
|
||||
// In the case of compressed oops, we use a private, not-shared
|
||||
// overflow stack.
|
||||
par_scan_state->push_on_overflow_stack(from_space_obj);
|
||||
} else {
|
||||
// if the object has been forwarded to itself, then we cannot
|
||||
// use the klass pointer for the linked list. Instead we have
|
||||
// to allocate an oopDesc in the C-Heap and use that for the linked list.
|
||||
// XXX This is horribly inefficient when a promotion failure occurs
|
||||
// and should be fixed. XXX FIX ME !!!
|
||||
#ifndef PRODUCT
|
||||
Atomic::inc_ptr(&_num_par_pushes);
|
||||
assert(_num_par_pushes > 0, "Tautology");
|
||||
Atomic::inc_ptr(&_num_par_pushes);
|
||||
assert(_num_par_pushes > 0, "Tautology");
|
||||
#endif
|
||||
if (from_space_obj->forwardee() == from_space_obj) {
|
||||
oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
|
||||
listhead->forward_to(from_space_obj);
|
||||
from_space_obj = listhead;
|
||||
}
|
||||
oop observed_overflow_list = _overflow_list;
|
||||
oop cur_overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
if (cur_overflow_list != BUSY) {
|
||||
from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
|
||||
} else {
|
||||
from_space_obj->set_klass_to_list_ptr(NULL);
|
||||
if (from_space_obj->forwardee() == from_space_obj) {
|
||||
oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
|
||||
listhead->forward_to(from_space_obj);
|
||||
from_space_obj = listhead;
|
||||
}
|
||||
observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
oop observed_overflow_list = _overflow_list;
|
||||
oop cur_overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
if (cur_overflow_list != BUSY) {
|
||||
from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
|
||||
} else {
|
||||
from_space_obj->set_klass_to_list_ptr(NULL);
|
||||
}
|
||||
observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
}
|
||||
}
|
||||
|
||||
bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
|
||||
bool res;
|
||||
|
||||
if (UseCompressedOops) {
|
||||
res = par_scan_state->take_from_overflow_stack();
|
||||
} else {
|
||||
res = take_from_overflow_list_work(par_scan_state);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
// *NOTE*: The overflow list manipulation code here and
|
||||
// in CMSCollector:: are very similar in shape,
|
||||
// except that in the CMS case we thread the objects
|
||||
@ -1213,14 +1298,13 @@ void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadSt
|
||||
// similar changes might be needed.
|
||||
// See CMSCollector::par_take_from_overflow_list() for
|
||||
// more extensive documentation comments.
|
||||
bool
|
||||
ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
|
||||
bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
|
||||
ObjToScanQueue* work_q = par_scan_state->work_queue();
|
||||
assert(work_q->size() == 0, "Should first empty local work queue");
|
||||
// How many to take?
|
||||
size_t objsFromOverflow = MIN2((size_t)work_q->max_elems()/4,
|
||||
size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
assert(par_scan_state->overflow_stack() == NULL, "Error");
|
||||
if (_overflow_list == NULL) return false;
|
||||
|
||||
// Otherwise, there was something there; try claiming the list.
|
||||
|
@ -55,6 +55,7 @@ class ParScanThreadState {
|
||||
friend class ParScanThreadStateSet;
|
||||
private:
|
||||
ObjToScanQueue *_work_queue;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
|
||||
ParGCAllocBuffer _to_space_alloc_buffer;
|
||||
|
||||
@ -79,6 +80,9 @@ class ParScanThreadState {
|
||||
Space* _to_space;
|
||||
Space* to_space() { return _to_space; }
|
||||
|
||||
ParNewGeneration* _young_gen;
|
||||
ParNewGeneration* young_gen() const { return _young_gen; }
|
||||
|
||||
Generation* _old_gen;
|
||||
Generation* old_gen() { return _old_gen; }
|
||||
|
||||
@ -134,6 +138,11 @@ class ParScanThreadState {
|
||||
// Decrease queue size below "max_size".
|
||||
void trim_queues(int max_size);
|
||||
|
||||
// Private overflow stack usage
|
||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||
bool take_from_overflow_stack();
|
||||
void push_on_overflow_stack(oop p);
|
||||
|
||||
// Is new_obj a candidate for scan_partial_array_and_push_remainder method.
|
||||
inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
|
||||
|
||||
@ -378,13 +387,17 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
NOT_PRODUCT(int _overflow_counter;)
|
||||
NOT_PRODUCT(bool should_simulate_overflow();)
|
||||
|
||||
// Accessor for overflow list
|
||||
oop overflow_list() { return _overflow_list; }
|
||||
|
||||
// Push the given (from-space) object on the global overflow list.
|
||||
void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
|
||||
|
||||
// If the global overflow list is non-empty, move some tasks from it
|
||||
// onto "work_q" (which must be empty). No more than 1/4 of the
|
||||
// max_elems of "work_q" are moved.
|
||||
// onto "work_q" (which need not be empty). No more than 1/4 of the
|
||||
// available space on "work_q" is used.
|
||||
bool take_from_overflow_list(ParScanThreadState* par_scan_state);
|
||||
bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
|
||||
|
||||
// The task queues to be used by parallel GC threads.
|
||||
ObjToScanQueueSet* task_queues() {
|
||||
|
@ -60,7 +60,7 @@ class GCCause : public AllStatic {
|
||||
_old_generation_too_full_to_scavenge,
|
||||
_adaptive_size_policy,
|
||||
|
||||
_g1_inc_collection_pause, _g1_pop_region_collection_pause,
|
||||
_g1_inc_collection_pause,
|
||||
|
||||
_last_ditch_collection,
|
||||
_last_gc_cause
|
||||
|
@ -1316,6 +1316,9 @@ class CommandLineFlags {
|
||||
\
|
||||
product(intx, ParGCArrayScanChunk, 50, \
|
||||
"Scan a subset and push remainder, if array is bigger than this") \
|
||||
product(bool, ParGCTrimOverflow, true, \
|
||||
"Eagerly trim the overflow lists (useful for UseCompressedOops") \
|
||||
\
|
||||
\
|
||||
notproduct(bool, ParGCWorkQueueOverflowALot, false, \
|
||||
"Whether we should simulate work queue overflow in ParNew") \
|
||||
|
@ -59,7 +59,6 @@
|
||||
template(G1CollectFull) \
|
||||
template(G1CollectForAllocation) \
|
||||
template(G1IncCollectionPause) \
|
||||
template(G1PopRegionCollectionPause) \
|
||||
template(EnableBiasedLocking) \
|
||||
template(RevokeBias) \
|
||||
template(BulkRevokeBias) \
|
||||
|
Loading…
Reference in New Issue
Block a user