This commit is contained in:
Jon Masamitsu 2014-07-23 14:06:28 -07:00
commit e242f23105
14 changed files with 72 additions and 62 deletions

View File

@ -36,9 +36,6 @@
#include <sys/ioctl.h>
#include <netdb.h>
// Defined in the system headers included above.
#undef rem_size
inline void* os::thread_local_storage_at(int index) {
return pthread_getspecific((pthread_key_t)index);
}

View File

@ -1904,12 +1904,12 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(size > new_size, "Split from a smaller block?");
assert(is_aligned(chunk), "alignment problem");
assert(size == adjustObjectSize(size), "alignment problem");
size_t rem_size = size - new_size;
assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
size_t rem_sz = size - new_size;
assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
assert(is_aligned(ffc), "alignment problem");
ffc->set_size(rem_size);
ffc->set_size(rem_sz);
ffc->link_next(NULL);
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
// Above must occur before BOT is updated below.
@ -1917,18 +1917,18 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
OrderAccess::storestore();
assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_size < SmallForDictionary) {
if (rem_sz < SmallForDictionary) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
if (is_par) _indexedFreeListParLocks[rem_size]->lock();
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
assert(!is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc);
split(size, rem_size);
if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
} else {
returnChunkToDictionary(ffc);
split(size ,rem_size);
split(size, rem_sz);
}
chunk->set_size(new_size);
return chunk;

View File

@ -891,6 +891,10 @@ void ConcurrentMark::clearNextBitmap() {
guarantee(!g1h->mark_in_progress(), "invariant");
}
bool ConcurrentMark::nextMarkBitmapIsClear() {
return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
}
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
@ -3358,7 +3362,8 @@ void ConcurrentMark::print_stats() {
// abandon current marking iteration due to a Full GC
void ConcurrentMark::abort() {
// Clear all marks to force marking thread to do nothing
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
_nextMarkBitMap->clearAll();
// Note we cannot clear the previous marking bitmap here

View File

@ -736,6 +736,9 @@ public:
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
// Return whether the next mark bitmap has no marks set.
bool nextMarkBitmapIsClear();
// These two do the work that needs to be done before and after the
// initial root checkpoint. Since this checkpoint can be done at two
// different points (i.e. an explicit pause or piggy-backed on a

View File

@ -277,9 +277,13 @@ void ConcurrentMarkThread::run() {
// We now want to allow clearing of the marking bitmap to be
// suspended by a collection pause.
{
// We may have aborted just before the remark. Do not bother clearing the
// bitmap then, as it has been done during mark abort.
if (!cm()->has_aborted()) {
SuspendibleThreadSetJoiner sts;
_cm->clearNextBitmap();
} else {
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
}
}

View File

@ -2950,11 +2950,18 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
}
}
CompactibleSpace* G1CollectedHeap::first_compactible_space() {
return n_regions() > 0 ? region_at(0) : NULL;
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
// We're not using an iterator given that it will wrap around when
// it reaches the last region and this is not what we want here.
for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
HeapRegion* hr = region_at(index);
if (!hr->isHumongous()) {
return hr;
}
}
return NULL;
}
Space* G1CollectedHeap::space_containing(const void* addr) const {
return heap_region_containing(addr);
}

View File

@ -1158,19 +1158,19 @@ public:
}
// The total number of regions in the heap.
uint n_regions() { return _hrs.length(); }
uint n_regions() const { return _hrs.length(); }
// The max number of regions in the heap.
uint max_regions() { return _hrs.max_length(); }
uint max_regions() const { return _hrs.max_length(); }
// The number of regions that are completely free.
uint free_regions() { return _free_list.length(); }
uint free_regions() const { return _free_list.length(); }
// The number of regions that are not completely free.
uint used_regions() { return n_regions() - free_regions(); }
uint used_regions() const { return n_regions() - free_regions(); }
// The number of regions available for "regular" expansion.
uint expansion_regions() { return _expansion_regions; }
uint expansion_regions() const { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
@ -1392,8 +1392,7 @@ public:
// As above but starting from region r
void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
// Returns the first (lowest address) compactible space in the heap.
virtual CompactibleSpace* first_compactible_space();
HeapRegion* next_compaction_region(const HeapRegion* from) const;
// A CollectedHeap will contain some number of spaces. This finds the
// space containing a given address, or else returns NULL.

View File

@ -1047,7 +1047,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
bool new_in_marking_window = _in_marking_window;
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
if (last_pause_included_initial_mark) {
new_in_marking_window = true;
new_in_marking_window_im = true;
}

View File

@ -199,6 +199,23 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
CompactPoint _cp;
HeapRegionSetCount _humongous_regions_removed;
bool is_cp_initialized() const {
return _cp.space != NULL;
}
void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
// If this is the first live region that we came across which we can compact,
// initialize the CompactPoint.
if (!is_cp_initialized()) {
_cp.space = hr;
_cp.threshold = hr->initialize_threshold();
}
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), end));
}
void free_humongous_region(HeapRegion* hr) {
HeapWord* end = hr->end();
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
@ -210,18 +227,15 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
_humongous_regions_removed.increment(1u, hr->capacity());
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), end));
prepare_for_compaction(hr, end);
dummy_free_list.remove_all();
}
public:
G1PrepareCompactClosure(CompactibleSpace* cs)
G1PrepareCompactClosure()
: _g1h(G1CollectedHeap::heap()),
_mrbs(_g1h->g1_barrier_set()),
_cp(NULL, cs, cs->initialize_threshold()),
_cp(NULL),
_humongous_regions_removed() { }
void update_sets() {
@ -244,10 +258,7 @@ public:
assert(hr->continuesHumongous(), "Invalid humongous.");
}
} else {
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
prepare_for_compaction(hr, hr->end());
}
return false;
}
@ -265,14 +276,7 @@ void G1MarkSweep::mark_sweep_phase2() {
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("2");
// find the first region
HeapRegion* r = g1h->region_at(0);
CompactibleSpace* sp = r;
if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
sp = r->next_compaction_space();
}
G1PrepareCompactClosure blk(sp);
G1PrepareCompactClosure blk;
g1h->heap_region_iterate(&blk);
blk.update_sets();
}

View File

@ -381,18 +381,7 @@ HeapRegion::HeapRegion(uint hrs_index,
}
CompactibleSpace* HeapRegion::next_compaction_space() const {
// We're not using an iterator given that it will wrap around when
// it reaches the last region and this is not what we want here.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
uint index = hrs_index() + 1;
while (index < g1h->n_regions()) {
HeapRegion* hr = g1h->region_at(index);
if (!hr->isHumongous()) {
return hr;
}
index += 1;
}
return NULL;
return G1CollectedHeap::heap()->next_compaction_region(this);
}
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,

View File

@ -119,7 +119,7 @@ protected:
public:
const char* name() { return _name; }
uint length() { return _count.length(); }
uint length() const { return _count.length(); }
bool is_empty() { return _count.length() == 0; }

View File

@ -1088,7 +1088,7 @@ void GenCollectedHeap::prepare_for_compaction() {
guarantee(_n_gens = 2, "Wrong number of generations");
Generation* old_gen = _gens[1];
// Start by compacting into same gen.
CompactPoint cp(old_gen, NULL, NULL);
CompactPoint cp(old_gen);
old_gen->prepare_for_compaction(&cp);
Generation* young_gen = _gens[0];
young_gen->prepare_for_compaction(&cp);

View File

@ -330,9 +330,9 @@ public:
Generation* gen;
CompactibleSpace* space;
HeapWord* threshold;
CompactPoint(Generation* _gen, CompactibleSpace* _space,
HeapWord* _threshold) :
gen(_gen), space(_space), threshold(_threshold) {}
CompactPoint(Generation* _gen) :
gen(_gen), space(NULL), threshold(0) {}
};

View File

@ -1538,8 +1538,10 @@ void Arguments::set_conservative_max_heap_alignment() {
heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
}
#endif // INCLUDE_ALL_GCS
_conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
CollectorPolicy::compute_heap_alignment());
_conservative_max_heap_alignment = MAX4(heap_alignment,
(size_t)os::vm_allocation_granularity(),
os::max_page_size(),
CollectorPolicy::compute_heap_alignment());
}
void Arguments::set_ergonomics_flags() {