8073466: Remove buffer retaining functionality and clean up in ParGCAllocBuffer

Reviewed-by: jmasa, kbarrett
This commit is contained in:
Thomas Schatzl 2015-03-03 12:38:42 +01:00
parent 40d3986051
commit 2d22f2780c
5 changed files with 113 additions and 151 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -120,7 +120,7 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
alloc_buf->retire();
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
@ -154,9 +154,7 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
if (buf != NULL) {
add_to_alloc_buffer_waste(buf->words_remaining());
buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
true /* end_of_gc */,
false /* retain */);
buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,11 +159,11 @@ public:
_retired = false;
}
virtual void retire(bool end_of_gc, bool retain) {
virtual void retire() {
if (_retired) {
return;
}
ParGCAllocBuffer::retire(end_of_gc, retain);
ParGCAllocBuffer::retire();
_retired = true;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -232,7 +232,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
if (word_sz * 100 <
ParallelGCBufferWastePct * plab->word_sz()) {
// Is small enough; abandon this buffer and start a new one.
plab->retire(false, false);
plab->retire();
size_t buf_size = plab->word_sz();
HeapWord* buf_space = sp->par_allocate(buf_size);
if (buf_space == NULL) {
@ -463,10 +463,7 @@ void ParScanThreadStateSet::flush()
// Flush stats related to To-space PLAB activity and
// retire the last buffer.
par_scan_state.to_space_alloc_buffer()->
flush_stats_and_retire(_gen.plab_stats(),
true /* end_of_gc */,
false /* retain */);
par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
// Every thread has its own age table. We need to merge
// them all into one.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,27 +24,30 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/threadLocalAllocBuffer.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
size_t ParGCAllocBuffer::min_size() {
// Make sure that we return something that is larger than AlignmentReserve
return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
}
size_t ParGCAllocBuffer::max_size() {
return ThreadLocalAllocBuffer::max_size();
}
ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
_word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
_end(NULL), _hard_end(NULL),
_retained(false), _retained_filler(),
_allocated(0), _wasted(0)
_end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
{
assert (min_size() > AlignmentReserve, "Inconsistency!");
// arrayOopDesc::header_size depends on command line initialization.
FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
// ArrayOopDesc::header_size depends on command line initialization.
AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
assert(min_size() > AlignmentReserve,
err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
"to be able to contain objects", min_size(), AlignmentReserve));
}
size_t ParGCAllocBuffer::FillerHeaderSize;
// If the minimum object size is greater than MinObjAlignment, we can
// end up with a shard at the end of the buffer that's smaller than
// the smallest object. We can't allow that because the buffer must
@ -52,39 +55,33 @@ size_t ParGCAllocBuffer::FillerHeaderSize;
// sure we have enough space for a filler int array object.
size_t ParGCAllocBuffer::AlignmentReserve;
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
assert(!retain || end_of_gc, "Can only retain at GC end.");
if (_retained) {
// If the buffer had been retained shorten the previous filler object.
assert(_retained_filler.end() <= _top, "INVARIANT");
CollectedHeap::fill_with_object(_retained_filler);
// Wasted space book-keeping, otherwise (normally) done in invalidate()
_wasted += _retained_filler.word_size();
_retained = false;
}
assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
if (_top < _hard_end) {
CollectedHeap::fill_with_object(_top, _hard_end);
if (!retain) {
invalidate();
} else {
// Is there wasted space we'd like to retain for the next GC?
if (pointer_delta(_end, _top) > FillerHeaderSize) {
_retained = true;
_retained_filler = MemRegion(_top, FillerHeaderSize);
_top = _top + FillerHeaderSize;
} else {
invalidate();
}
}
}
}
void ParGCAllocBuffer::flush_and_retire_stats(PLABStats* stats) {
// Retire the last allocation buffer.
size_t unused = retire_internal();
void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
assert(ResizePLAB, "Wasted work");
// Now flush the statistics.
stats->add_allocated(_allocated);
stats->add_wasted(_wasted);
stats->add_unused(pointer_delta(_end, _top));
stats->add_unused(unused);
// Since we have flushed the stats we need to clear the _allocated and _wasted
// fields in case somebody retains an instance of this over GCs. Not doing so
// will artifically inflate the values in the statistics.
_allocated = 0;
_wasted = 0;
}
void ParGCAllocBuffer::retire() {
_wasted += retire_internal();
}
size_t ParGCAllocBuffer::retire_internal() {
size_t result = 0;
if (_top < _hard_end) {
CollectedHeap::fill_with_object(_top, _hard_end);
result += invalidate();
}
return result;
}
// Compute desired plab size and latch result for later
@ -101,44 +98,37 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
err_msg("Inconsistency in PLAB stats: "
"_allocated: "SIZE_FORMAT", "
"_wasted: "SIZE_FORMAT", "
"_unused: "SIZE_FORMAT", "
"_used : "SIZE_FORMAT,
_allocated, _wasted, _unused, _used));
"_unused: "SIZE_FORMAT,
_allocated, _wasted, _unused));
_allocated = 1;
}
double wasted_frac = (double)_unused/(double)_allocated;
size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
TargetPLABWastePct);
double wasted_frac = (double)_unused / (double)_allocated;
size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
if (target_refills == 0) {
target_refills = 1;
}
_used = _allocated - _wasted - _unused;
size_t plab_sz = _used/(target_refills*no_of_gc_workers);
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " ", plab_sz);
size_t used = _allocated - _wasted - _unused;
size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
// Take historical weighted average
_filter.sample(plab_sz);
_filter.sample(recent_plab_sz);
// Clip from above and below, and align to object boundary
plab_sz = MAX2(min_size(), (size_t)_filter.average());
plab_sz = MIN2(max_size(), plab_sz);
plab_sz = align_object_size(plab_sz);
size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
new_plab_sz = MIN2(max_size(), new_plab_sz);
new_plab_sz = align_object_size(new_plab_sz);
// Latch the result
if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = " SIZE_FORMAT ") ", plab_sz);
_desired_plab_sz = plab_sz;
// Now clear the accumulators for next round:
// note this needs to be fixed in the case where we
// are retaining across scavenges. FIX ME !!! XXX
_allocated = 0;
_wasted = 0;
_unused = 0;
if (PrintPLAB) {
gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
}
_desired_plab_sz = new_plab_sz;
reset();
}
#ifndef PRODUCT
void ParGCAllocBuffer::print() {
gclog_or_tty->print("parGCAllocBuffer: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
" _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT " _retained: %c"
" _retained_filler: [" PTR_FORMAT "," PTR_FORMAT ")\n",
_bottom, _top, _end, _hard_end,
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
gclog_or_tty->print_cr("parGCAllocBuffer: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
" _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT ")",
p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
}
#endif // !PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,37 +24,43 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
#include "gc_interface/collectedHeap.hpp"
#include "gc_implementation/shared/gcUtil.hpp"
#include "memory/allocation.hpp"
#include "memory/blockOffsetTable.hpp"
#include "memory/threadLocalAllocBuffer.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
// Forward decl.
// Forward declarations.
class PLABStats;
// A per-thread allocation buffer used during GC.
class ParGCAllocBuffer: public CHeapObj<mtGC> {
protected:
char head[32];
size_t _word_sz; // in HeapWord units
char head[32];
size_t _word_sz; // In HeapWord units
HeapWord* _bottom;
HeapWord* _top;
HeapWord* _end; // last allocatable address + 1
HeapWord* _hard_end; // _end + AlignmentReserve
bool _retained; // whether we hold a _retained_filler
MemRegion _retained_filler;
HeapWord* _end; // Last allocatable address + 1
HeapWord* _hard_end; // _end + AlignmentReserve
// In support of ergonomic sizing of PLAB's
size_t _allocated; // in HeapWord units
size_t _wasted; // in HeapWord units
char tail[32];
static size_t FillerHeaderSize;
char tail[32];
static size_t AlignmentReserve;
// Flush the stats supporting ergonomic sizing of PLAB's
// Should not be called directly
void flush_stats(PLABStats* stats);
// Force future allocations to fail and queries for contains()
// to return false. Returns the amount of unused space in this PLAB.
size_t invalidate() {
_end = _hard_end;
size_t remaining = pointer_delta(_end, _top); // Calculate remaining space.
_top = _end; // Force future allocations to fail.
_bottom = _end; // Force future contains() queries to return false.
return remaining;
}
// Fill in remaining space with a dummy object and invalidate the PLAB. Returns
// the amount of remaining space.
size_t retire_internal();
public:
// Initializes the buffer to be empty, but with the given "word_sz".
@ -62,14 +68,10 @@ public:
ParGCAllocBuffer(size_t word_sz);
virtual ~ParGCAllocBuffer() {}
static const size_t min_size() {
// Make sure that we return something that is larger than AlignmentReserve
return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
}
static const size_t max_size() {
return ThreadLocalAllocBuffer::max_size();
}
// Minimum PLAB size.
static size_t min_size();
// Maximum PLAB size.
static size_t max_size();
// If an allocation of the given "word_sz" can be satisfied within the
// buffer, do the allocation, returning a pointer to the start of the
@ -128,62 +130,37 @@ public:
_allocated += word_sz();
}
// Flush the stats supporting ergonomic sizing of PLAB's
// and retire the current buffer.
void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
// We flush the stats first in order to get a reading of
// unused space in the last buffer.
if (ResizePLAB) {
flush_stats(stats);
// Flush allocation statistics into the given PLABStats supporting ergonomic
// sizing of PLAB's and retire the current buffer. To be called at the end of
// GC.
void flush_and_retire_stats(PLABStats* stats);
// Since we have flushed the stats we need to clear
// the _allocated and _wasted fields. Not doing so
// will artifically inflate the values in the stats
// to which we add them.
// The next time we flush these values, we will add
// what we have just flushed in addition to the size
// of the buffers allocated between now and then.
_allocated = 0;
_wasted = 0;
}
// Retire the last allocation buffer.
retire(end_of_gc, retain);
}
// Force future allocations to fail and queries for contains()
// to return false
void invalidate() {
assert(!_retained, "Shouldn't retain an invalidated buffer.");
_end = _hard_end;
_wasted += pointer_delta(_end, _top); // unused space
_top = _end; // force future allocations to fail
_bottom = _end; // force future contains() queries to return false
}
// Fills in the unallocated portion of the buffer with a garbage object.
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
// is true, attempt to re-use the unused portion in the next GC.
virtual void retire(bool end_of_gc, bool retain);
// Fills in the unallocated portion of the buffer with a garbage object and updates
// statistics. To be called during GC.
virtual void retire();
void print() PRODUCT_RETURN;
};
// PLAB stats book-keeping
// PLAB book-keeping.
class PLABStats VALUE_OBJ_CLASS_SPEC {
size_t _allocated; // total allocated
size_t _allocated; // Total allocated
size_t _wasted; // of which wasted (internal fragmentation)
size_t _unused; // Unused in last buffer
size_t _used; // derived = allocated - wasted - unused
size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
AdaptiveWeightedAverage
_filter; // integrator with decay
_filter; // Integrator with decay
void reset() {
_allocated = 0;
_wasted = 0;
_unused = 0;
}
public:
PLABStats(size_t desired_plab_sz_, unsigned wt) :
_allocated(0),
_wasted(0),
_unused(0),
_used(0),
_desired_plab_sz(desired_plab_sz_),
_filter(wt)
{ }
@ -200,9 +177,9 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
return _desired_plab_sz;
}
// Updates the current desired PLAB size. Computes the new desired PLAB size,
// updates _desired_plab_sz and clears sensor accumulators.
void adjust_desired_plab_sz(uint no_of_gc_workers);
// filter computation, latches output to
// _desired_plab_sz, clears sensor accumulators
void add_allocated(size_t v) {
Atomic::add_ptr(v, &_allocated);