8079556: BACKOUT - Determining the desired PLAB size adjusts to the the number of threads at the wrong place

Reviewed-by: jwilhelm, brutisso
This commit is contained in:
Thomas Schatzl 2015-05-07 10:32:42 +02:00
parent 19e80a1f8f
commit 00e2ae0942
9 changed files with 29 additions and 38 deletions

View File

@ -83,7 +83,7 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info)
&_retained_old_gc_alloc_region);
}
void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
AllocationContext_t context = AllocationContext::current();
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
old_gc_alloc_region(context)->count());
@ -99,8 +99,8 @@ void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_inf
}
if (ResizePLAB) {
_g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
_g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
_g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
_g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
}
}

View File

@ -53,7 +53,7 @@ public:
virtual void release_mutator_alloc_region() = 0;
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
@ -114,7 +114,7 @@ public:
virtual void release_mutator_alloc_region();
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions();
virtual bool is_retained_old_region(HeapRegion* hr) {

View File

@ -5439,7 +5439,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
phase_times->record_string_dedup_fixup_time(fixup_time_ms);
}
_allocator->release_gc_alloc_regions(evacuation_info);
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
// Reset and re-enable the hot card cache.

View File

@ -276,7 +276,7 @@ private:
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.

View File

@ -48,7 +48,7 @@ PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
}
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region

View File

@ -1033,7 +1033,7 @@ void ParNewGeneration::collect(bool full,
to()->set_concurrent_iteration_safe_limit(to()->top());
if (ResizePLAB) {
plab_stats()->adjust_desired_plab_sz();
plab_stats()->adjust_desired_plab_sz(n_workers);
}
if (PrintGC && !PrintGCDetails) {
@ -1071,10 +1071,6 @@ void ParNewGeneration::collect(bool full,
_gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
size_t ParNewGeneration::desired_plab_sz() {
return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
}
static int sum;
void ParNewGeneration::waste_some_time() {
for (int i = 0; i < 100; i++) {

View File

@ -411,7 +411,9 @@ class ParNewGeneration: public DefNewGeneration {
return &_plab_stats;
}
size_t desired_plab_sz();
size_t desired_plab_sz() {
return _plab_stats.desired_plab_sz();
}
const ParNewTracer* gc_tracer() const {
return &_gc_tracer;

View File

@ -109,17 +109,10 @@ void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
}
}
// Calculates plab size for current number of gc worker threads.
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
assert(no_of_gc_workers > 0, "Number of GC workers should be larger than zero");
return align_object_size(_desired_net_plab_sz / MAX2(no_of_gc_workers, 1U));
}
// Compute desired plab size for one gc worker thread and latch result for later
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz() {
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
assert(ResizePLAB, "Not set");
assert(is_object_aligned(max_size()) && min_size() <= max_size(),
@ -142,8 +135,7 @@ void PLABStats::adjust_desired_plab_sz() {
target_refills = 1;
}
size_t used = _allocated - _wasted - _unused;
// Assumed to have 1 gc worker thread
size_t recent_plab_sz = used / target_refills;
size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
// Take historical weighted average
_filter.sample(recent_plab_sz);
// Clip from above and below, and align to object boundary
@ -154,7 +146,7 @@ void PLABStats::adjust_desired_plab_sz() {
if (PrintPLAB) {
gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
}
_desired_net_plab_sz = new_plab_sz;
_desired_plab_sz = new_plab_sz;
reset();
}

View File

@ -150,13 +150,13 @@ public:
// PLAB book-keeping.
class PLABStats VALUE_OBJ_CLASS_SPEC {
size_t _allocated; // Total allocated
size_t _wasted; // of which wasted (internal fragmentation)
size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size)
size_t _unused; // Unused in last buffer
size_t _desired_net_plab_sz; // Output of filter (below), suitably trimmed and quantized
size_t _allocated; // Total allocated
size_t _wasted; // of which wasted (internal fragmentation)
size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size)
size_t _unused; // Unused in last buffer
size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
AdaptiveWeightedAverage
_filter; // Integrator with decay
_filter; // Integrator with decay
void reset() {
_allocated = 0;
@ -165,12 +165,12 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
_unused = 0;
}
public:
PLABStats(size_t desired_net_plab_sz_, unsigned wt) :
PLABStats(size_t desired_plab_sz_, unsigned wt) :
_allocated(0),
_wasted(0),
_undo_wasted(0),
_unused(0),
_desired_net_plab_sz(desired_net_plab_sz_),
_desired_plab_sz(desired_plab_sz_),
_filter(wt)
{ }
@ -182,12 +182,13 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
return PLAB::max_size();
}
// Calculates plab size for current number of gc worker threads.
size_t desired_plab_sz(uint no_of_gc_workers);
size_t desired_plab_sz() {
return _desired_plab_sz;
}
// Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread,
// Updates the current desired PLAB size. Computes the new desired PLAB size,
// updates _desired_plab_sz and clears sensor accumulators.
void adjust_desired_plab_sz();
void adjust_desired_plab_sz(uint no_of_gc_workers);
void add_allocated(size_t v) {
Atomic::add_ptr(v, &_allocated);