Merge
This commit is contained in:
commit
e242f23105
@ -36,9 +36,6 @@
|
|||||||
#include <sys/ioctl.h>
|
#include <sys/ioctl.h>
|
||||||
#include <netdb.h>
|
#include <netdb.h>
|
||||||
|
|
||||||
// Defined in the system headers included above.
|
|
||||||
#undef rem_size
|
|
||||||
|
|
||||||
inline void* os::thread_local_storage_at(int index) {
|
inline void* os::thread_local_storage_at(int index) {
|
||||||
return pthread_getspecific((pthread_key_t)index);
|
return pthread_getspecific((pthread_key_t)index);
|
||||||
}
|
}
|
||||||
|
@ -1904,12 +1904,12 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
|||||||
assert(size > new_size, "Split from a smaller block?");
|
assert(size > new_size, "Split from a smaller block?");
|
||||||
assert(is_aligned(chunk), "alignment problem");
|
assert(is_aligned(chunk), "alignment problem");
|
||||||
assert(size == adjustObjectSize(size), "alignment problem");
|
assert(size == adjustObjectSize(size), "alignment problem");
|
||||||
size_t rem_size = size - new_size;
|
size_t rem_sz = size - new_size;
|
||||||
assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
|
assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
|
||||||
assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
|
assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
|
||||||
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
|
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
|
||||||
assert(is_aligned(ffc), "alignment problem");
|
assert(is_aligned(ffc), "alignment problem");
|
||||||
ffc->set_size(rem_size);
|
ffc->set_size(rem_sz);
|
||||||
ffc->link_next(NULL);
|
ffc->link_next(NULL);
|
||||||
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||||
// Above must occur before BOT is updated below.
|
// Above must occur before BOT is updated below.
|
||||||
@ -1917,18 +1917,18 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
|||||||
OrderAccess::storestore();
|
OrderAccess::storestore();
|
||||||
assert(chunk->is_free() && ffc->is_free(), "Error");
|
assert(chunk->is_free() && ffc->is_free(), "Error");
|
||||||
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
||||||
if (rem_size < SmallForDictionary) {
|
if (rem_sz < SmallForDictionary) {
|
||||||
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
||||||
if (is_par) _indexedFreeListParLocks[rem_size]->lock();
|
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
|
||||||
assert(!is_par ||
|
assert(!is_par ||
|
||||||
(SharedHeap::heap()->n_par_threads() ==
|
(SharedHeap::heap()->n_par_threads() ==
|
||||||
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
|
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||||
returnChunkToFreeList(ffc);
|
returnChunkToFreeList(ffc);
|
||||||
split(size, rem_size);
|
split(size, rem_sz);
|
||||||
if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
|
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
|
||||||
} else {
|
} else {
|
||||||
returnChunkToDictionary(ffc);
|
returnChunkToDictionary(ffc);
|
||||||
split(size ,rem_size);
|
split(size, rem_sz);
|
||||||
}
|
}
|
||||||
chunk->set_size(new_size);
|
chunk->set_size(new_size);
|
||||||
return chunk;
|
return chunk;
|
||||||
|
@ -891,6 +891,10 @@ void ConcurrentMark::clearNextBitmap() {
|
|||||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ConcurrentMark::nextMarkBitmapIsClear() {
|
||||||
|
return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
|
||||||
|
}
|
||||||
|
|
||||||
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
||||||
public:
|
public:
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
@ -3358,7 +3362,8 @@ void ConcurrentMark::print_stats() {
|
|||||||
|
|
||||||
// abandon current marking iteration due to a Full GC
|
// abandon current marking iteration due to a Full GC
|
||||||
void ConcurrentMark::abort() {
|
void ConcurrentMark::abort() {
|
||||||
// Clear all marks to force marking thread to do nothing
|
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
|
||||||
|
// concurrent bitmap clearing.
|
||||||
_nextMarkBitMap->clearAll();
|
_nextMarkBitMap->clearAll();
|
||||||
|
|
||||||
// Note we cannot clear the previous marking bitmap here
|
// Note we cannot clear the previous marking bitmap here
|
||||||
|
@ -736,6 +736,9 @@ public:
|
|||||||
// Clear the next marking bitmap (will be called concurrently).
|
// Clear the next marking bitmap (will be called concurrently).
|
||||||
void clearNextBitmap();
|
void clearNextBitmap();
|
||||||
|
|
||||||
|
// Return whether the next mark bitmap has no marks set.
|
||||||
|
bool nextMarkBitmapIsClear();
|
||||||
|
|
||||||
// These two do the work that needs to be done before and after the
|
// These two do the work that needs to be done before and after the
|
||||||
// initial root checkpoint. Since this checkpoint can be done at two
|
// initial root checkpoint. Since this checkpoint can be done at two
|
||||||
// different points (i.e. an explicit pause or piggy-backed on a
|
// different points (i.e. an explicit pause or piggy-backed on a
|
||||||
|
@ -277,9 +277,13 @@ void ConcurrentMarkThread::run() {
|
|||||||
|
|
||||||
// We now want to allow clearing of the marking bitmap to be
|
// We now want to allow clearing of the marking bitmap to be
|
||||||
// suspended by a collection pause.
|
// suspended by a collection pause.
|
||||||
{
|
// We may have aborted just before the remark. Do not bother clearing the
|
||||||
|
// bitmap then, as it has been done during mark abort.
|
||||||
|
if (!cm()->has_aborted()) {
|
||||||
SuspendibleThreadSetJoiner sts;
|
SuspendibleThreadSetJoiner sts;
|
||||||
_cm->clearNextBitmap();
|
_cm->clearNextBitmap();
|
||||||
|
} else {
|
||||||
|
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2950,11 +2950,18 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CompactibleSpace* G1CollectedHeap::first_compactible_space() {
|
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||||
return n_regions() > 0 ? region_at(0) : NULL;
|
// We're not using an iterator given that it will wrap around when
|
||||||
|
// it reaches the last region and this is not what we want here.
|
||||||
|
for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
|
||||||
|
HeapRegion* hr = region_at(index);
|
||||||
|
if (!hr->isHumongous()) {
|
||||||
|
return hr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Space* G1CollectedHeap::space_containing(const void* addr) const {
|
Space* G1CollectedHeap::space_containing(const void* addr) const {
|
||||||
return heap_region_containing(addr);
|
return heap_region_containing(addr);
|
||||||
}
|
}
|
||||||
|
@ -1158,19 +1158,19 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The total number of regions in the heap.
|
// The total number of regions in the heap.
|
||||||
uint n_regions() { return _hrs.length(); }
|
uint n_regions() const { return _hrs.length(); }
|
||||||
|
|
||||||
// The max number of regions in the heap.
|
// The max number of regions in the heap.
|
||||||
uint max_regions() { return _hrs.max_length(); }
|
uint max_regions() const { return _hrs.max_length(); }
|
||||||
|
|
||||||
// The number of regions that are completely free.
|
// The number of regions that are completely free.
|
||||||
uint free_regions() { return _free_list.length(); }
|
uint free_regions() const { return _free_list.length(); }
|
||||||
|
|
||||||
// The number of regions that are not completely free.
|
// The number of regions that are not completely free.
|
||||||
uint used_regions() { return n_regions() - free_regions(); }
|
uint used_regions() const { return n_regions() - free_regions(); }
|
||||||
|
|
||||||
// The number of regions available for "regular" expansion.
|
// The number of regions available for "regular" expansion.
|
||||||
uint expansion_regions() { return _expansion_regions; }
|
uint expansion_regions() const { return _expansion_regions; }
|
||||||
|
|
||||||
// Factory method for HeapRegion instances. It will return NULL if
|
// Factory method for HeapRegion instances. It will return NULL if
|
||||||
// the allocation fails.
|
// the allocation fails.
|
||||||
@ -1392,8 +1392,7 @@ public:
|
|||||||
// As above but starting from region r
|
// As above but starting from region r
|
||||||
void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
|
void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
|
||||||
|
|
||||||
// Returns the first (lowest address) compactible space in the heap.
|
HeapRegion* next_compaction_region(const HeapRegion* from) const;
|
||||||
virtual CompactibleSpace* first_compactible_space();
|
|
||||||
|
|
||||||
// A CollectedHeap will contain some number of spaces. This finds the
|
// A CollectedHeap will contain some number of spaces. This finds the
|
||||||
// space containing a given address, or else returns NULL.
|
// space containing a given address, or else returns NULL.
|
||||||
|
@ -1047,7 +1047,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
|||||||
|
|
||||||
bool new_in_marking_window = _in_marking_window;
|
bool new_in_marking_window = _in_marking_window;
|
||||||
bool new_in_marking_window_im = false;
|
bool new_in_marking_window_im = false;
|
||||||
if (during_initial_mark_pause()) {
|
if (last_pause_included_initial_mark) {
|
||||||
new_in_marking_window = true;
|
new_in_marking_window = true;
|
||||||
new_in_marking_window_im = true;
|
new_in_marking_window_im = true;
|
||||||
}
|
}
|
||||||
|
@ -199,6 +199,23 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
|
|||||||
CompactPoint _cp;
|
CompactPoint _cp;
|
||||||
HeapRegionSetCount _humongous_regions_removed;
|
HeapRegionSetCount _humongous_regions_removed;
|
||||||
|
|
||||||
|
bool is_cp_initialized() const {
|
||||||
|
return _cp.space != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
|
||||||
|
// If this is the first live region that we came across which we can compact,
|
||||||
|
// initialize the CompactPoint.
|
||||||
|
if (!is_cp_initialized()) {
|
||||||
|
_cp.space = hr;
|
||||||
|
_cp.threshold = hr->initialize_threshold();
|
||||||
|
}
|
||||||
|
hr->prepare_for_compaction(&_cp);
|
||||||
|
// Also clear the part of the card table that will be unused after
|
||||||
|
// compaction.
|
||||||
|
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
||||||
|
}
|
||||||
|
|
||||||
void free_humongous_region(HeapRegion* hr) {
|
void free_humongous_region(HeapRegion* hr) {
|
||||||
HeapWord* end = hr->end();
|
HeapWord* end = hr->end();
|
||||||
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
||||||
@ -210,18 +227,15 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
|
|||||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||||
|
|
||||||
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
||||||
hr->prepare_for_compaction(&_cp);
|
prepare_for_compaction(hr, end);
|
||||||
// Also clear the part of the card table that will be unused after
|
|
||||||
// compaction.
|
|
||||||
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
|
||||||
dummy_free_list.remove_all();
|
dummy_free_list.remove_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1PrepareCompactClosure(CompactibleSpace* cs)
|
G1PrepareCompactClosure()
|
||||||
: _g1h(G1CollectedHeap::heap()),
|
: _g1h(G1CollectedHeap::heap()),
|
||||||
_mrbs(_g1h->g1_barrier_set()),
|
_mrbs(_g1h->g1_barrier_set()),
|
||||||
_cp(NULL, cs, cs->initialize_threshold()),
|
_cp(NULL),
|
||||||
_humongous_regions_removed() { }
|
_humongous_regions_removed() { }
|
||||||
|
|
||||||
void update_sets() {
|
void update_sets() {
|
||||||
@ -244,10 +258,7 @@ public:
|
|||||||
assert(hr->continuesHumongous(), "Invalid humongous.");
|
assert(hr->continuesHumongous(), "Invalid humongous.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
hr->prepare_for_compaction(&_cp);
|
prepare_for_compaction(hr, hr->end());
|
||||||
// Also clear the part of the card table that will be unused after
|
|
||||||
// compaction.
|
|
||||||
_mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
|
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -265,14 +276,7 @@ void G1MarkSweep::mark_sweep_phase2() {
|
|||||||
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
|
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
|
||||||
GenMarkSweep::trace("2");
|
GenMarkSweep::trace("2");
|
||||||
|
|
||||||
// find the first region
|
G1PrepareCompactClosure blk;
|
||||||
HeapRegion* r = g1h->region_at(0);
|
|
||||||
CompactibleSpace* sp = r;
|
|
||||||
if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
|
|
||||||
sp = r->next_compaction_space();
|
|
||||||
}
|
|
||||||
|
|
||||||
G1PrepareCompactClosure blk(sp);
|
|
||||||
g1h->heap_region_iterate(&blk);
|
g1h->heap_region_iterate(&blk);
|
||||||
blk.update_sets();
|
blk.update_sets();
|
||||||
}
|
}
|
||||||
|
@ -381,18 +381,7 @@ HeapRegion::HeapRegion(uint hrs_index,
|
|||||||
}
|
}
|
||||||
|
|
||||||
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
||||||
// We're not using an iterator given that it will wrap around when
|
return G1CollectedHeap::heap()->next_compaction_region(this);
|
||||||
// it reaches the last region and this is not what we want here.
|
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
|
||||||
uint index = hrs_index() + 1;
|
|
||||||
while (index < g1h->n_regions()) {
|
|
||||||
HeapRegion* hr = g1h->region_at(index);
|
|
||||||
if (!hr->isHumongous()) {
|
|
||||||
return hr;
|
|
||||||
}
|
|
||||||
index += 1;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
|
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
|
||||||
|
@ -119,7 +119,7 @@ protected:
|
|||||||
public:
|
public:
|
||||||
const char* name() { return _name; }
|
const char* name() { return _name; }
|
||||||
|
|
||||||
uint length() { return _count.length(); }
|
uint length() const { return _count.length(); }
|
||||||
|
|
||||||
bool is_empty() { return _count.length() == 0; }
|
bool is_empty() { return _count.length() == 0; }
|
||||||
|
|
||||||
|
@ -1088,7 +1088,7 @@ void GenCollectedHeap::prepare_for_compaction() {
|
|||||||
guarantee(_n_gens = 2, "Wrong number of generations");
|
guarantee(_n_gens = 2, "Wrong number of generations");
|
||||||
Generation* old_gen = _gens[1];
|
Generation* old_gen = _gens[1];
|
||||||
// Start by compacting into same gen.
|
// Start by compacting into same gen.
|
||||||
CompactPoint cp(old_gen, NULL, NULL);
|
CompactPoint cp(old_gen);
|
||||||
old_gen->prepare_for_compaction(&cp);
|
old_gen->prepare_for_compaction(&cp);
|
||||||
Generation* young_gen = _gens[0];
|
Generation* young_gen = _gens[0];
|
||||||
young_gen->prepare_for_compaction(&cp);
|
young_gen->prepare_for_compaction(&cp);
|
||||||
|
@ -330,9 +330,9 @@ public:
|
|||||||
Generation* gen;
|
Generation* gen;
|
||||||
CompactibleSpace* space;
|
CompactibleSpace* space;
|
||||||
HeapWord* threshold;
|
HeapWord* threshold;
|
||||||
CompactPoint(Generation* _gen, CompactibleSpace* _space,
|
|
||||||
HeapWord* _threshold) :
|
CompactPoint(Generation* _gen) :
|
||||||
gen(_gen), space(_space), threshold(_threshold) {}
|
gen(_gen), space(NULL), threshold(0) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1538,8 +1538,10 @@ void Arguments::set_conservative_max_heap_alignment() {
|
|||||||
heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
|
heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
_conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
|
_conservative_max_heap_alignment = MAX4(heap_alignment,
|
||||||
CollectorPolicy::compute_heap_alignment());
|
(size_t)os::vm_allocation_granularity(),
|
||||||
|
os::max_page_size(),
|
||||||
|
CollectorPolicy::compute_heap_alignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::set_ergonomics_flags() {
|
void Arguments::set_ergonomics_flags() {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user