8038423: G1: Decommit memory within heap
Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps. Reviewed-by: mgerdin, brutisso, jwilhelm
This commit is contained in:
parent
2617d54723
commit
100e51a339
@ -81,8 +81,8 @@ void ConcurrentG1Refine::reset_threshold_step() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentG1Refine::init() {
|
void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
|
||||||
_hot_card_cache.initialize();
|
_hot_card_cache.initialize(card_counts_storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentG1Refine::stop() {
|
void ConcurrentG1Refine::stop() {
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
class ConcurrentG1RefineThread;
|
class ConcurrentG1RefineThread;
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
class G1HotCardCache;
|
class G1HotCardCache;
|
||||||
|
class G1RegionToSpaceMapper;
|
||||||
class G1RemSet;
|
class G1RemSet;
|
||||||
class DirtyCardQueue;
|
class DirtyCardQueue;
|
||||||
|
|
||||||
@ -74,7 +75,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
|||||||
ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
|
ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
|
||||||
~ConcurrentG1Refine();
|
~ConcurrentG1Refine();
|
||||||
|
|
||||||
void init(); // Accomplish some initialization that has to wait.
|
void init(G1RegionToSpaceMapper* card_counts_storage);
|
||||||
void stop();
|
void stop();
|
||||||
|
|
||||||
void reinitialize_threads();
|
void reinitialize_threads();
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||||
#include "gc_implementation/shared/gcTimer.hpp"
|
#include "gc_implementation/shared/gcTimer.hpp"
|
||||||
#include "gc_implementation/shared/gcTrace.hpp"
|
#include "gc_implementation/shared/gcTrace.hpp"
|
||||||
@ -99,12 +100,12 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
|
bool CMBitMapRO::covers(MemRegion heap_rs) const {
|
||||||
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
|
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
|
||||||
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
|
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
|
||||||
"size inconsistency");
|
"size inconsistency");
|
||||||
return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
|
return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
|
||||||
_bmWordSize == heap_rs.size()>>LogHeapWordSize;
|
_bmWordSize == heap_rs.word_size();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -112,33 +113,73 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
|
|||||||
_bm.print_on_error(st, prefix);
|
_bm.print_on_error(st, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CMBitMap::allocate(ReservedSpace heap_rs) {
|
size_t CMBitMap::compute_size(size_t heap_size) {
|
||||||
_bmStartWord = (HeapWord*)(heap_rs.base());
|
return heap_size / mark_distance();
|
||||||
_bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
|
|
||||||
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
|
|
||||||
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
|
|
||||||
if (!brs.is_reserved()) {
|
|
||||||
warning("ConcurrentMark marking bit map allocation failure");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
|
|
||||||
// For now we'll just commit all of the bit map up front.
|
|
||||||
// Later on we'll try to be more parsimonious with swap.
|
|
||||||
if (!_virtual_space.initialize(brs, brs.size())) {
|
|
||||||
warning("ConcurrentMark marking bit map backing store failure");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
assert(_virtual_space.committed_size() == brs.size(),
|
|
||||||
"didn't reserve backing store for all of concurrent marking bit map?");
|
|
||||||
_bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
|
|
||||||
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
|
|
||||||
_bmWordSize, "inconsistency in bit map sizing");
|
|
||||||
_bm.set_size(_bmWordSize >> _shifter);
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t CMBitMap::mark_distance() {
|
||||||
|
return MinObjAlignmentInBytes * BitsPerByte;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
|
||||||
|
_bmStartWord = heap.start();
|
||||||
|
_bmWordSize = heap.word_size();
|
||||||
|
|
||||||
|
_bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
|
||||||
|
_bm.set_size(_bmWordSize >> _shifter);
|
||||||
|
|
||||||
|
storage->set_mapping_changed_listener(&_listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
|
||||||
|
// We need to clear the bitmap on commit, removing any existing information.
|
||||||
|
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
|
||||||
|
_bm->clearRange(mr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Closure used for clearing the given mark bitmap.
|
||||||
|
class ClearBitmapHRClosure : public HeapRegionClosure {
|
||||||
|
private:
|
||||||
|
ConcurrentMark* _cm;
|
||||||
|
CMBitMap* _bitmap;
|
||||||
|
bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
|
||||||
|
public:
|
||||||
|
ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
|
||||||
|
assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
|
size_t const chunk_size_in_words = M / HeapWordSize;
|
||||||
|
|
||||||
|
HeapWord* cur = r->bottom();
|
||||||
|
HeapWord* const end = r->end();
|
||||||
|
|
||||||
|
while (cur < end) {
|
||||||
|
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
|
||||||
|
_bitmap->clearRange(mr);
|
||||||
|
|
||||||
|
cur += chunk_size_in_words;
|
||||||
|
|
||||||
|
// Abort iteration if after yielding the marking has been aborted.
|
||||||
|
if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// Repeat the asserts from before the start of the closure. We will do them
|
||||||
|
// as asserts here to minimize their overhead on the product. However, we
|
||||||
|
// will have them as guarantees at the beginning / end of the bitmap
|
||||||
|
// clearing to get some checking in the product.
|
||||||
|
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
|
||||||
|
assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void CMBitMap::clearAll() {
|
void CMBitMap::clearAll() {
|
||||||
_bm.clear();
|
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
|
||||||
|
G1CollectedHeap::heap()->heap_region_iterate(&cl);
|
||||||
|
guarantee(cl.complete(), "Must have completed iteration.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,10 +524,10 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
|
|||||||
return MAX2((n_par_threads + 2) / 4, 1U);
|
return MAX2((n_par_threads + 2) / 4, 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
|
||||||
_g1h(g1h),
|
_g1h(g1h),
|
||||||
_markBitMap1(log2_intptr(MinObjAlignment)),
|
_markBitMap1(),
|
||||||
_markBitMap2(log2_intptr(MinObjAlignment)),
|
_markBitMap2(),
|
||||||
_parallel_marking_threads(0),
|
_parallel_marking_threads(0),
|
||||||
_max_parallel_marking_threads(0),
|
_max_parallel_marking_threads(0),
|
||||||
_sleep_factor(0.0),
|
_sleep_factor(0.0),
|
||||||
@ -495,7 +536,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
|||||||
_cleanup_task_overhead(1.0),
|
_cleanup_task_overhead(1.0),
|
||||||
_cleanup_list("Cleanup List"),
|
_cleanup_list("Cleanup List"),
|
||||||
_region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
|
_region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
|
||||||
_card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
|
_card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
|
||||||
CardTableModRefBS::card_shift,
|
CardTableModRefBS::card_shift,
|
||||||
false /* in_resource_area*/),
|
false /* in_resource_area*/),
|
||||||
|
|
||||||
@ -545,14 +586,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
|||||||
"heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
|
"heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_markBitMap1.allocate(heap_rs)) {
|
_markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
|
||||||
warning("Failed to allocate first CM bit map");
|
_markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!_markBitMap2.allocate(heap_rs)) {
|
|
||||||
warning("Failed to allocate second CM bit map");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create & start a ConcurrentMark thread.
|
// Create & start a ConcurrentMark thread.
|
||||||
_cmThread = new ConcurrentMarkThread(this);
|
_cmThread = new ConcurrentMarkThread(this);
|
||||||
@ -563,8 +598,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
||||||
assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
|
assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
|
||||||
assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
|
assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
|
||||||
|
|
||||||
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
||||||
satb_qs.set_buffer_size(G1SATBBufferSize);
|
satb_qs.set_buffer_size(G1SATBBufferSize);
|
||||||
@ -724,37 +759,17 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
|||||||
clear_all_count_data();
|
clear_all_count_data();
|
||||||
|
|
||||||
// so that the call below can read a sensible value
|
// so that the call below can read a sensible value
|
||||||
_heap_start = (HeapWord*) heap_rs.base();
|
_heap_start = g1h->reserved_region().start();
|
||||||
set_non_marking_state();
|
set_non_marking_state();
|
||||||
_completed_initialization = true;
|
_completed_initialization = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMark::update_heap_boundaries(MemRegion bounds, bool force) {
|
|
||||||
// If concurrent marking is not in progress, then we do not need to
|
|
||||||
// update _heap_end.
|
|
||||||
if (!concurrent_marking_in_progress() && !force) return;
|
|
||||||
|
|
||||||
assert(bounds.start() == _heap_start, "start shouldn't change");
|
|
||||||
HeapWord* new_end = bounds.end();
|
|
||||||
if (new_end > _heap_end) {
|
|
||||||
// The heap has been expanded.
|
|
||||||
|
|
||||||
_heap_end = new_end;
|
|
||||||
}
|
|
||||||
// Notice that the heap can also shrink. However, this only happens
|
|
||||||
// during a Full GC (at least currently) and the entire marking
|
|
||||||
// phase will bail out and the task will not be restarted. So, let's
|
|
||||||
// do nothing.
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMark::reset() {
|
void ConcurrentMark::reset() {
|
||||||
// Starting values for these two. This should be called in a STW
|
// Starting values for these two. This should be called in a STW
|
||||||
// phase. CM will be notified of any future g1_committed expansions
|
// phase.
|
||||||
// will be at the end of evacuation pauses, when tasks are
|
MemRegion reserved = _g1h->g1_reserved();
|
||||||
// inactive.
|
_heap_start = reserved.start();
|
||||||
MemRegion committed = _g1h->g1_committed();
|
_heap_end = reserved.end();
|
||||||
_heap_start = committed.start();
|
|
||||||
_heap_end = committed.end();
|
|
||||||
|
|
||||||
// Separated the asserts so that we know which one fires.
|
// Separated the asserts so that we know which one fires.
|
||||||
assert(_heap_start != NULL, "heap bounds should look ok");
|
assert(_heap_start != NULL, "heap bounds should look ok");
|
||||||
@ -826,7 +841,6 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
|
|||||||
assert(out_of_regions(),
|
assert(out_of_regions(),
|
||||||
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
|
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
|
||||||
p2i(_finger), p2i(_heap_end)));
|
p2i(_finger), p2i(_heap_end)));
|
||||||
update_heap_boundaries(_g1h->g1_committed(), true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -845,7 +859,6 @@ ConcurrentMark::~ConcurrentMark() {
|
|||||||
|
|
||||||
void ConcurrentMark::clearNextBitmap() {
|
void ConcurrentMark::clearNextBitmap() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
|
||||||
|
|
||||||
// Make sure that the concurrent mark thread looks to still be in
|
// Make sure that the concurrent mark thread looks to still be in
|
||||||
// the current cycle.
|
// the current cycle.
|
||||||
@ -857,41 +870,36 @@ void ConcurrentMark::clearNextBitmap() {
|
|||||||
// is the case.
|
// is the case.
|
||||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||||
|
|
||||||
// clear the mark bitmap (no grey objects to start with).
|
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
|
||||||
// We need to do this in chunks and offer to yield in between
|
g1h->heap_region_iterate(&cl);
|
||||||
// each chunk.
|
|
||||||
HeapWord* start = _nextMarkBitMap->startWord();
|
|
||||||
HeapWord* end = _nextMarkBitMap->endWord();
|
|
||||||
HeapWord* cur = start;
|
|
||||||
size_t chunkSize = M;
|
|
||||||
while (cur < end) {
|
|
||||||
HeapWord* next = cur + chunkSize;
|
|
||||||
if (next > end) {
|
|
||||||
next = end;
|
|
||||||
}
|
|
||||||
MemRegion mr(cur,next);
|
|
||||||
_nextMarkBitMap->clearRange(mr);
|
|
||||||
cur = next;
|
|
||||||
do_yield_check();
|
|
||||||
|
|
||||||
// Repeat the asserts from above. We'll do them as asserts here to
|
// Clear the liveness counting data. If the marking has been aborted, the abort()
|
||||||
// minimize their overhead on the product. However, we'll have
|
// call already did that.
|
||||||
// them as guarantees at the beginning / end of the bitmap
|
if (cl.complete()) {
|
||||||
// clearing to get some checking in the product.
|
clear_all_count_data();
|
||||||
assert(cmThread()->during_cycle(), "invariant");
|
|
||||||
assert(!g1h->mark_in_progress(), "invariant");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the liveness counting data
|
|
||||||
clear_all_count_data();
|
|
||||||
|
|
||||||
// Repeat the asserts from above.
|
// Repeat the asserts from above.
|
||||||
guarantee(cmThread()->during_cycle(), "invariant");
|
guarantee(cmThread()->during_cycle(), "invariant");
|
||||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class CheckBitmapClearHRClosure : public HeapRegionClosure {
|
||||||
|
CMBitMap* _bitmap;
|
||||||
|
bool _error;
|
||||||
|
public:
|
||||||
|
CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
|
return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
bool ConcurrentMark::nextMarkBitmapIsClear() {
|
bool ConcurrentMark::nextMarkBitmapIsClear() {
|
||||||
return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
|
CheckBitmapClearHRClosure cl(_nextMarkBitMap);
|
||||||
|
_g1h->heap_region_iterate(&cl);
|
||||||
|
return cl.complete();
|
||||||
}
|
}
|
||||||
|
|
||||||
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
||||||
@ -2192,8 +2200,8 @@ void ConcurrentMark::completeCleanup() {
|
|||||||
_cleanup_list.length());
|
_cleanup_list.length());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Noone else should be accessing the _cleanup_list at this point,
|
// No one else should be accessing the _cleanup_list at this point,
|
||||||
// so it's not necessary to take any locks
|
// so it is not necessary to take any locks
|
||||||
while (!_cleanup_list.is_empty()) {
|
while (!_cleanup_list.is_empty()) {
|
||||||
HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
|
HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
|
||||||
assert(hr != NULL, "Got NULL from a non-empty list");
|
assert(hr != NULL, "Got NULL from a non-empty list");
|
||||||
@ -2979,22 +2987,25 @@ ConcurrentMark::claim_region(uint worker_id) {
|
|||||||
// claim_region() and a humongous object allocation might force us
|
// claim_region() and a humongous object allocation might force us
|
||||||
// to do a bit of unnecessary work (due to some unnecessary bitmap
|
// to do a bit of unnecessary work (due to some unnecessary bitmap
|
||||||
// iterations) but it should not introduce and correctness issues.
|
// iterations) but it should not introduce and correctness issues.
|
||||||
HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
|
HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
|
||||||
HeapWord* bottom = curr_region->bottom();
|
|
||||||
HeapWord* end = curr_region->end();
|
|
||||||
HeapWord* limit = curr_region->next_top_at_mark_start();
|
|
||||||
|
|
||||||
if (verbose_low()) {
|
// Above heap_region_containing_raw may return NULL as we always scan claim
|
||||||
gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
|
// until the end of the heap. In this case, just jump to the next region.
|
||||||
"["PTR_FORMAT", "PTR_FORMAT"), "
|
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
|
||||||
"limit = "PTR_FORMAT,
|
|
||||||
worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is the gap between reading the finger and doing the CAS too long?
|
// Is the gap between reading the finger and doing the CAS too long?
|
||||||
HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
|
HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
|
||||||
if (res == finger) {
|
if (res == finger && curr_region != NULL) {
|
||||||
// we succeeded
|
// we succeeded
|
||||||
|
HeapWord* bottom = curr_region->bottom();
|
||||||
|
HeapWord* limit = curr_region->next_top_at_mark_start();
|
||||||
|
|
||||||
|
if (verbose_low()) {
|
||||||
|
gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
|
||||||
|
"["PTR_FORMAT", "PTR_FORMAT"), "
|
||||||
|
"limit = "PTR_FORMAT,
|
||||||
|
worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
|
||||||
|
}
|
||||||
|
|
||||||
// notice that _finger == end cannot be guaranteed here since,
|
// notice that _finger == end cannot be guaranteed here since,
|
||||||
// someone else might have moved the finger even further
|
// someone else might have moved the finger even further
|
||||||
@ -3025,10 +3036,17 @@ ConcurrentMark::claim_region(uint worker_id) {
|
|||||||
} else {
|
} else {
|
||||||
assert(_finger > finger, "the finger should have moved forward");
|
assert(_finger > finger, "the finger should have moved forward");
|
||||||
if (verbose_low()) {
|
if (verbose_low()) {
|
||||||
gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
|
if (curr_region == NULL) {
|
||||||
"global finger = "PTR_FORMAT", "
|
gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
|
||||||
"our finger = "PTR_FORMAT,
|
"global finger = "PTR_FORMAT", "
|
||||||
worker_id, p2i(_finger), p2i(finger));
|
"our finger = "PTR_FORMAT,
|
||||||
|
worker_id, p2i(_finger), p2i(finger));
|
||||||
|
} else {
|
||||||
|
gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
|
||||||
|
"global finger = "PTR_FORMAT", "
|
||||||
|
"our finger = "PTR_FORMAT,
|
||||||
|
worker_id, p2i(_finger), p2i(finger));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read it again
|
// read it again
|
||||||
@ -3143,8 +3161,10 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
|
|||||||
// happens, heap_region_containing() will return the bottom of the
|
// happens, heap_region_containing() will return the bottom of the
|
||||||
// corresponding starts humongous region and the check below will
|
// corresponding starts humongous region and the check below will
|
||||||
// not hold any more.
|
// not hold any more.
|
||||||
|
// Since we always iterate over all regions, we might get a NULL HeapRegion
|
||||||
|
// here.
|
||||||
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
|
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
|
||||||
guarantee(global_finger == global_hr->bottom(),
|
guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
|
||||||
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
|
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
|
||||||
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
|
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
|
||||||
}
|
}
|
||||||
@ -3157,7 +3177,7 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
|
|||||||
if (task_finger != NULL && task_finger < _heap_end) {
|
if (task_finger != NULL && task_finger < _heap_end) {
|
||||||
// See above note on the global finger verification.
|
// See above note on the global finger verification.
|
||||||
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
|
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
|
||||||
guarantee(task_finger == task_hr->bottom() ||
|
guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
|
||||||
!task_hr->in_collection_set(),
|
!task_hr->in_collection_set(),
|
||||||
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
|
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
|
||||||
p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
|
p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
|
||||||
@ -4673,7 +4693,6 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
|||||||
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
|
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
|
||||||
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
|
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
MemRegion g1_committed = g1h->g1_committed();
|
|
||||||
MemRegion g1_reserved = g1h->g1_reserved();
|
MemRegion g1_reserved = g1h->g1_reserved();
|
||||||
double now = os::elapsedTime();
|
double now = os::elapsedTime();
|
||||||
|
|
||||||
@ -4681,10 +4700,8 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
|||||||
_out->cr();
|
_out->cr();
|
||||||
_out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
|
_out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
|
||||||
_out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
|
_out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
|
||||||
G1PPRL_SUM_ADDR_FORMAT("committed")
|
|
||||||
G1PPRL_SUM_ADDR_FORMAT("reserved")
|
G1PPRL_SUM_ADDR_FORMAT("reserved")
|
||||||
G1PPRL_SUM_BYTE_FORMAT("region-size"),
|
G1PPRL_SUM_BYTE_FORMAT("region-size"),
|
||||||
p2i(g1_committed.start()), p2i(g1_committed.end()),
|
|
||||||
p2i(g1_reserved.start()), p2i(g1_reserved.end()),
|
p2i(g1_reserved.start()), p2i(g1_reserved.end()),
|
||||||
HeapRegion::GrainBytes);
|
HeapRegion::GrainBytes);
|
||||||
_out->print_cr(G1PPRL_LINE_PREFIX);
|
_out->print_cr(G1PPRL_LINE_PREFIX);
|
||||||
|
@ -27,10 +27,12 @@
|
|||||||
|
|
||||||
#include "classfile/javaClasses.hpp"
|
#include "classfile/javaClasses.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "gc_implementation/shared/gcId.hpp"
|
#include "gc_implementation/shared/gcId.hpp"
|
||||||
#include "utilities/taskqueue.hpp"
|
#include "utilities/taskqueue.hpp"
|
||||||
|
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
|
class CMBitMap;
|
||||||
class CMTask;
|
class CMTask;
|
||||||
typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
|
typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
|
||||||
typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
|
typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
|
||||||
@ -57,7 +59,6 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
|||||||
HeapWord* _bmStartWord; // base address of range covered by map
|
HeapWord* _bmStartWord; // base address of range covered by map
|
||||||
size_t _bmWordSize; // map size (in #HeapWords covered)
|
size_t _bmWordSize; // map size (in #HeapWords covered)
|
||||||
const int _shifter; // map to char or bit
|
const int _shifter; // map to char or bit
|
||||||
VirtualSpace _virtual_space; // underlying the bit map
|
|
||||||
BitMap _bm; // the bit map itself
|
BitMap _bm; // the bit map itself
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -115,42 +116,41 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
|||||||
void print_on_error(outputStream* st, const char* prefix) const;
|
void print_on_error(outputStream* st, const char* prefix) const;
|
||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
|
NOT_PRODUCT(bool covers(MemRegion rs) const;)
|
||||||
|
};
|
||||||
|
|
||||||
|
class CMBitMapMappingChangedListener : public G1MappingChangedListener {
|
||||||
|
private:
|
||||||
|
CMBitMap* _bm;
|
||||||
|
public:
|
||||||
|
CMBitMapMappingChangedListener() : _bm(NULL) {}
|
||||||
|
|
||||||
|
void set_bitmap(CMBitMap* bm) { _bm = bm; }
|
||||||
|
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||||
};
|
};
|
||||||
|
|
||||||
class CMBitMap : public CMBitMapRO {
|
class CMBitMap : public CMBitMapRO {
|
||||||
|
private:
|
||||||
|
CMBitMapMappingChangedListener _listener;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// constructor
|
static size_t compute_size(size_t heap_size);
|
||||||
CMBitMap(int shifter) :
|
// Returns the amount of bytes on the heap between two marks in the bitmap.
|
||||||
CMBitMapRO(shifter) {}
|
static size_t mark_distance();
|
||||||
|
|
||||||
// Allocates the back store for the marking bitmap
|
CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
|
||||||
bool allocate(ReservedSpace heap_rs);
|
|
||||||
|
// Initializes the underlying BitMap to cover the given area.
|
||||||
|
void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||||
|
|
||||||
|
// Write marks.
|
||||||
|
inline void mark(HeapWord* addr);
|
||||||
|
inline void clear(HeapWord* addr);
|
||||||
|
inline bool parMark(HeapWord* addr);
|
||||||
|
inline bool parClear(HeapWord* addr);
|
||||||
|
|
||||||
// write marks
|
|
||||||
void mark(HeapWord* addr) {
|
|
||||||
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
|
||||||
"outside underlying space?");
|
|
||||||
_bm.set_bit(heapWordToOffset(addr));
|
|
||||||
}
|
|
||||||
void clear(HeapWord* addr) {
|
|
||||||
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
|
||||||
"outside underlying space?");
|
|
||||||
_bm.clear_bit(heapWordToOffset(addr));
|
|
||||||
}
|
|
||||||
bool parMark(HeapWord* addr) {
|
|
||||||
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
|
||||||
"outside underlying space?");
|
|
||||||
return _bm.par_set_bit(heapWordToOffset(addr));
|
|
||||||
}
|
|
||||||
bool parClear(HeapWord* addr) {
|
|
||||||
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
|
||||||
"outside underlying space?");
|
|
||||||
return _bm.par_clear_bit(heapWordToOffset(addr));
|
|
||||||
}
|
|
||||||
void markRange(MemRegion mr);
|
void markRange(MemRegion mr);
|
||||||
void clearAll();
|
|
||||||
void clearRange(MemRegion mr);
|
void clearRange(MemRegion mr);
|
||||||
|
|
||||||
// Starting at the bit corresponding to "addr" (inclusive), find the next
|
// Starting at the bit corresponding to "addr" (inclusive), find the next
|
||||||
@ -161,6 +161,9 @@ class CMBitMap : public CMBitMapRO {
|
|||||||
// the run. If there is no "1" bit at or after "addr", return an empty
|
// the run. If there is no "1" bit at or after "addr", return an empty
|
||||||
// MemRegion.
|
// MemRegion.
|
||||||
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
|
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
|
||||||
|
|
||||||
|
// Clear the whole mark bitmap.
|
||||||
|
void clearAll();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
|
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
|
||||||
@ -680,7 +683,7 @@ public:
|
|||||||
return _task_queues->steal(worker_id, hash_seed, obj);
|
return _task_queues->steal(worker_id, hash_seed, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
|
ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
|
||||||
~ConcurrentMark();
|
~ConcurrentMark();
|
||||||
|
|
||||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||||
@ -736,7 +739,8 @@ public:
|
|||||||
// Clear the next marking bitmap (will be called concurrently).
|
// Clear the next marking bitmap (will be called concurrently).
|
||||||
void clearNextBitmap();
|
void clearNextBitmap();
|
||||||
|
|
||||||
// Return whether the next mark bitmap has no marks set.
|
// Return whether the next mark bitmap has no marks set. To be used for assertions
|
||||||
|
// only. Will not yield to pause requests.
|
||||||
bool nextMarkBitmapIsClear();
|
bool nextMarkBitmapIsClear();
|
||||||
|
|
||||||
// These two do the work that needs to be done before and after the
|
// These two do the work that needs to be done before and after the
|
||||||
@ -794,12 +798,6 @@ public:
|
|||||||
bool verify_thread_buffers,
|
bool verify_thread_buffers,
|
||||||
bool verify_fingers) PRODUCT_RETURN;
|
bool verify_fingers) PRODUCT_RETURN;
|
||||||
|
|
||||||
// It is called at the end of an evacuation pause during marking so
|
|
||||||
// that CM is notified of where the new end of the heap is. It
|
|
||||||
// doesn't do anything if concurrent_marking_in_progress() is false,
|
|
||||||
// unless the force parameter is true.
|
|
||||||
void update_heap_boundaries(MemRegion bounds, bool force = false);
|
|
||||||
|
|
||||||
bool isMarked(oop p) const {
|
bool isMarked(oop p) const {
|
||||||
assert(p != NULL && p->is_oop(), "expected an oop");
|
assert(p != NULL && p->is_oop(), "expected an oop");
|
||||||
HeapWord* addr = (HeapWord*)p;
|
HeapWord* addr = (HeapWord*)p;
|
||||||
|
@ -268,6 +268,36 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
|
|||||||
return iterate(cl, mr);
|
return iterate(cl, mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define check_mark(addr) \
|
||||||
|
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
|
||||||
|
"outside underlying space?"); \
|
||||||
|
assert(G1CollectedHeap::heap()->is_in_exact(addr), \
|
||||||
|
err_msg("Trying to access not available bitmap "PTR_FORMAT \
|
||||||
|
" corresponding to "PTR_FORMAT" (%u)", \
|
||||||
|
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
|
||||||
|
|
||||||
|
inline void CMBitMap::mark(HeapWord* addr) {
|
||||||
|
check_mark(addr);
|
||||||
|
_bm.set_bit(heapWordToOffset(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void CMBitMap::clear(HeapWord* addr) {
|
||||||
|
check_mark(addr);
|
||||||
|
_bm.clear_bit(heapWordToOffset(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool CMBitMap::parMark(HeapWord* addr) {
|
||||||
|
check_mark(addr);
|
||||||
|
return _bm.par_set_bit(heapWordToOffset(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool CMBitMap::parClear(HeapWord* addr) {
|
||||||
|
check_mark(addr);
|
||||||
|
return _bm.par_clear_bit(heapWordToOffset(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef check_mark
|
||||||
|
|
||||||
inline void CMTask::push(oop obj) {
|
inline void CMTask::push(oop obj) {
|
||||||
HeapWord* objAddr = (HeapWord*) obj;
|
HeapWord* objAddr = (HeapWord*) obj;
|
||||||
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
|
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
|
||||||
|
@ -32,64 +32,37 @@
|
|||||||
|
|
||||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
|
|
||||||
|
void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||||
|
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||||
|
// retrieve it here since this would cause firing of several asserts. The code
|
||||||
|
// executed after commit of a region already needs to do some re-initialization of
|
||||||
|
// the HeapRegion, so we combine that.
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// G1BlockOffsetSharedArray
|
// G1BlockOffsetSharedArray
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
|
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
|
||||||
size_t init_word_size) :
|
_reserved(), _end(NULL), _listener(), _offset_array(NULL) {
|
||||||
_reserved(reserved), _end(NULL)
|
|
||||||
{
|
|
||||||
size_t size = compute_size(reserved.word_size());
|
|
||||||
ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
|
|
||||||
if (!rs.is_reserved()) {
|
|
||||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
|
||||||
}
|
|
||||||
if (!_vs.initialize(rs, 0)) {
|
|
||||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
|
||||||
}
|
|
||||||
|
|
||||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
_reserved = heap;
|
||||||
|
_end = NULL;
|
||||||
|
|
||||||
|
MemRegion bot_reserved = storage->reserved();
|
||||||
|
|
||||||
|
_offset_array = (u_char*)bot_reserved.start();
|
||||||
|
_end = _reserved.end();
|
||||||
|
|
||||||
|
storage->set_mapping_changed_listener(&_listener);
|
||||||
|
|
||||||
_offset_array = (u_char*)_vs.low_boundary();
|
|
||||||
resize(init_word_size);
|
|
||||||
if (TraceBlockOffsetTable) {
|
if (TraceBlockOffsetTable) {
|
||||||
gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
|
gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
|
||||||
gclog_or_tty->print_cr(" "
|
gclog_or_tty->print_cr(" "
|
||||||
" rs.base(): " INTPTR_FORMAT
|
" rs.base(): " INTPTR_FORMAT
|
||||||
" rs.size(): " INTPTR_FORMAT
|
" rs.size(): " INTPTR_FORMAT
|
||||||
" rs end(): " INTPTR_FORMAT,
|
" rs end(): " INTPTR_FORMAT,
|
||||||
rs.base(), rs.size(), rs.base() + rs.size());
|
bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
|
||||||
gclog_or_tty->print_cr(" "
|
|
||||||
" _vs.low_boundary(): " INTPTR_FORMAT
|
|
||||||
" _vs.high_boundary(): " INTPTR_FORMAT,
|
|
||||||
_vs.low_boundary(),
|
|
||||||
_vs.high_boundary());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
|
|
||||||
assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
|
|
||||||
size_t new_size = compute_size(new_word_size);
|
|
||||||
size_t old_size = _vs.committed_size();
|
|
||||||
size_t delta;
|
|
||||||
char* high = _vs.high();
|
|
||||||
_end = _reserved.start() + new_word_size;
|
|
||||||
if (new_size > old_size) {
|
|
||||||
delta = ReservedSpace::page_align_size_up(new_size - old_size);
|
|
||||||
assert(delta > 0, "just checking");
|
|
||||||
if (!_vs.expand_by(delta)) {
|
|
||||||
// Do better than this for Merlin
|
|
||||||
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
|
|
||||||
}
|
|
||||||
assert(_vs.high() == high + delta, "invalid expansion");
|
|
||||||
// Initialization of the contents is left to the
|
|
||||||
// G1BlockOffsetArray that uses it.
|
|
||||||
} else {
|
|
||||||
delta = ReservedSpace::page_align_size_down(old_size - new_size);
|
|
||||||
if (delta == 0) return;
|
|
||||||
_vs.shrink_by(delta);
|
|
||||||
assert(_vs.high() == high - delta, "invalid expansion");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,18 +73,7 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
||||||
check_index(index_for(right - 1), "right address out of range");
|
set_offset_array(index_for(left), index_for(right -1), offset);
|
||||||
assert(left < right, "Heap addresses out of order");
|
|
||||||
size_t num_cards = pointer_delta(right, left) >> LogN_words;
|
|
||||||
if (UseMemSetInBOT) {
|
|
||||||
memset(&_offset_array[index_for(left)], offset, num_cards);
|
|
||||||
} else {
|
|
||||||
size_t i = index_for(left);
|
|
||||||
const size_t end = i + num_cards;
|
|
||||||
for (; i < end; i++) {
|
|
||||||
_offset_array[i] = offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
@ -650,6 +612,25 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
|
|||||||
_next_offset_index = 0;
|
_next_offset_index = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
|
||||||
|
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||||
|
"just checking");
|
||||||
|
_next_offset_index = _array->index_for_raw(_bottom);
|
||||||
|
_next_offset_index++;
|
||||||
|
_next_offset_threshold =
|
||||||
|
_array->address_for_index_raw(_next_offset_index);
|
||||||
|
return _next_offset_threshold;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
|
||||||
|
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||||
|
"just checking");
|
||||||
|
size_t bottom_index = _array->index_for_raw(_bottom);
|
||||||
|
assert(_array->address_for_index_raw(bottom_index) == _bottom,
|
||||||
|
"Precondition of call");
|
||||||
|
_array->set_offset_array_raw(bottom_index, 0);
|
||||||
|
}
|
||||||
|
|
||||||
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||||
"just checking");
|
"just checking");
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "memory/memRegion.hpp"
|
#include "memory/memRegion.hpp"
|
||||||
#include "runtime/virtualspace.hpp"
|
#include "runtime/virtualspace.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
@ -106,6 +107,11 @@ public:
|
|||||||
inline HeapWord* block_start_const(const void* addr) const;
|
inline HeapWord* block_start_const(const void* addr) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
|
||||||
|
public:
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||||
|
};
|
||||||
|
|
||||||
// This implementation of "G1BlockOffsetTable" divides the covered region
|
// This implementation of "G1BlockOffsetTable" divides the covered region
|
||||||
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
|
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
|
||||||
// for each such subregion indicates how far back one must go to find the
|
// for each such subregion indicates how far back one must go to find the
|
||||||
@ -125,6 +131,7 @@ class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
G1BlockOffsetSharedArrayMappingChangedListener _listener;
|
||||||
// The reserved region covered by the shared array.
|
// The reserved region covered by the shared array.
|
||||||
MemRegion _reserved;
|
MemRegion _reserved;
|
||||||
|
|
||||||
@ -133,16 +140,8 @@ private:
|
|||||||
|
|
||||||
// Array for keeping offsets for retrieving object start fast given an
|
// Array for keeping offsets for retrieving object start fast given an
|
||||||
// address.
|
// address.
|
||||||
VirtualSpace _vs;
|
|
||||||
u_char* _offset_array; // byte array keeping backwards offsets
|
u_char* _offset_array; // byte array keeping backwards offsets
|
||||||
|
|
||||||
void check_index(size_t index, const char* msg) const {
|
|
||||||
assert(index < _vs.committed_size(),
|
|
||||||
err_msg("%s - "
|
|
||||||
"index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
|
|
||||||
msg, index, _vs.committed_size()));
|
|
||||||
}
|
|
||||||
|
|
||||||
void check_offset(size_t offset, const char* msg) const {
|
void check_offset(size_t offset, const char* msg) const {
|
||||||
assert(offset <= N_words,
|
assert(offset <= N_words,
|
||||||
err_msg("%s - "
|
err_msg("%s - "
|
||||||
@ -152,63 +151,33 @@ private:
|
|||||||
|
|
||||||
// Bounds checking accessors:
|
// Bounds checking accessors:
|
||||||
// For performance these have to devolve to array accesses in product builds.
|
// For performance these have to devolve to array accesses in product builds.
|
||||||
u_char offset_array(size_t index) const {
|
inline u_char offset_array(size_t index) const;
|
||||||
check_index(index, "index out of range");
|
|
||||||
return _offset_array[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
|
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
|
||||||
|
|
||||||
void set_offset_array(size_t index, u_char offset) {
|
void set_offset_array_raw(size_t index, u_char offset) {
|
||||||
check_index(index, "index out of range");
|
|
||||||
check_offset(offset, "offset too large");
|
|
||||||
_offset_array[index] = offset;
|
_offset_array[index] = offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
|
inline void set_offset_array(size_t index, u_char offset);
|
||||||
check_index(index, "index out of range");
|
|
||||||
assert(high >= low, "addresses out of order");
|
|
||||||
check_offset(pointer_delta(high, low), "offset too large");
|
|
||||||
_offset_array[index] = (u_char) pointer_delta(high, low);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_offset_array(size_t left, size_t right, u_char offset) {
|
inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
|
||||||
check_index(right, "right index out of range");
|
|
||||||
assert(left <= right, "indexes out of order");
|
|
||||||
size_t num_cards = right - left + 1;
|
|
||||||
if (UseMemSetInBOT) {
|
|
||||||
memset(&_offset_array[left], offset, num_cards);
|
|
||||||
} else {
|
|
||||||
size_t i = left;
|
|
||||||
const size_t end = i + num_cards;
|
|
||||||
for (; i < end; i++) {
|
|
||||||
_offset_array[i] = offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
inline void set_offset_array(size_t left, size_t right, u_char offset);
|
||||||
check_index(index, "index out of range");
|
|
||||||
assert(high >= low, "addresses out of order");
|
inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
|
||||||
check_offset(pointer_delta(high, low), "offset too large");
|
|
||||||
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_card_boundary(HeapWord* p) const;
|
bool is_card_boundary(HeapWord* p) const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
// Return the number of slots needed for an offset array
|
// Return the number of slots needed for an offset array
|
||||||
// that covers mem_region_words words.
|
// that covers mem_region_words words.
|
||||||
// We always add an extra slot because if an object
|
static size_t compute_size(size_t mem_region_words) {
|
||||||
// ends on a card boundary we put a 0 in the next
|
size_t number_of_slots = (mem_region_words / N_words);
|
||||||
// offset array slot, so we want that slot always
|
return ReservedSpace::allocation_align_size_up(number_of_slots);
|
||||||
// to be reserved.
|
|
||||||
|
|
||||||
size_t compute_size(size_t mem_region_words) {
|
|
||||||
size_t number_of_slots = (mem_region_words / N_words) + 1;
|
|
||||||
return ReservedSpace::page_align_size_up(number_of_slots);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
enum SomePublicConstants {
|
enum SomePublicConstants {
|
||||||
LogN = 9,
|
LogN = 9,
|
||||||
LogN_words = LogN - LogHeapWordSize,
|
LogN_words = LogN - LogHeapWordSize,
|
||||||
@ -222,21 +191,21 @@ public:
|
|||||||
// least "init_word_size".) The contents of the initial table are
|
// least "init_word_size".) The contents of the initial table are
|
||||||
// undefined; it is the responsibility of the constituent
|
// undefined; it is the responsibility of the constituent
|
||||||
// G1BlockOffsetTable(s) to initialize cards.
|
// G1BlockOffsetTable(s) to initialize cards.
|
||||||
G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
|
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||||
|
|
||||||
// Notes a change in the committed size of the region covered by the
|
|
||||||
// table. The "new_word_size" may not be larger than the size of the
|
|
||||||
// reserved region this table covers.
|
|
||||||
void resize(size_t new_word_size);
|
|
||||||
|
|
||||||
void set_bottom(HeapWord* new_bottom);
|
void set_bottom(HeapWord* new_bottom);
|
||||||
|
|
||||||
// Return the appropriate index into "_offset_array" for "p".
|
// Return the appropriate index into "_offset_array" for "p".
|
||||||
inline size_t index_for(const void* p) const;
|
inline size_t index_for(const void* p) const;
|
||||||
|
inline size_t index_for_raw(const void* p) const;
|
||||||
|
|
||||||
// Return the address indicating the start of the region corresponding to
|
// Return the address indicating the start of the region corresponding to
|
||||||
// "index" in "_offset_array".
|
// "index" in "_offset_array".
|
||||||
inline HeapWord* address_for_index(size_t index) const;
|
inline HeapWord* address_for_index(size_t index) const;
|
||||||
|
// Variant of address_for_index that does not check the index for validity.
|
||||||
|
inline HeapWord* address_for_index_raw(size_t index) const {
|
||||||
|
return _reserved.start() + (index << LogN_words);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// And here is the G1BlockOffsetTable subtype that uses the array.
|
// And here is the G1BlockOffsetTable subtype that uses the array.
|
||||||
@ -476,6 +445,12 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
|||||||
blk_start, blk_end);
|
blk_start, blk_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Variant of zero_bottom_entry that does not check for availability of the
|
||||||
|
// memory first.
|
||||||
|
void zero_bottom_entry_raw();
|
||||||
|
// Variant of initialize_threshold that does not check for availability of the
|
||||||
|
// memory first.
|
||||||
|
HeapWord* initialize_threshold_raw();
|
||||||
// Zero out the entry for _bottom (offset will be zero).
|
// Zero out the entry for _bottom (offset will be zero).
|
||||||
void zero_bottom_entry();
|
void zero_bottom_entry();
|
||||||
public:
|
public:
|
||||||
@ -486,8 +461,8 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
|||||||
HeapWord* initialize_threshold();
|
HeapWord* initialize_threshold();
|
||||||
|
|
||||||
void reset_bot() {
|
void reset_bot() {
|
||||||
zero_bottom_entry();
|
zero_bottom_entry_raw();
|
||||||
initialize_threshold();
|
initialize_threshold_raw();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the next threshold, the point at which the table should be
|
// Return the next threshold, the point at which the table should be
|
||||||
|
@ -47,14 +47,69 @@ G1BlockOffsetTable::block_start_const(const void* addr) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define check_index(index, msg) \
|
||||||
|
assert((index) < (_reserved.word_size() >> LogN_words), \
|
||||||
|
err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
|
||||||
|
msg, (index), (_reserved.word_size() >> LogN_words))); \
|
||||||
|
assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \
|
||||||
|
err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \
|
||||||
|
" (%u) is not in committed area.", \
|
||||||
|
(index), \
|
||||||
|
p2i(address_for_index_raw(index)), \
|
||||||
|
G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
|
||||||
|
|
||||||
|
u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
|
||||||
|
check_index(index, "index out of range");
|
||||||
|
return _offset_array[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
|
||||||
|
check_index(index, "index out of range");
|
||||||
|
set_offset_array_raw(index, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
|
||||||
|
check_index(index, "index out of range");
|
||||||
|
assert(high >= low, "addresses out of order");
|
||||||
|
size_t offset = pointer_delta(high, low);
|
||||||
|
check_offset(offset, "offset too large");
|
||||||
|
set_offset_array(index, (u_char)offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
|
||||||
|
check_index(right, "right index out of range");
|
||||||
|
assert(left <= right, "indexes out of order");
|
||||||
|
size_t num_cards = right - left + 1;
|
||||||
|
if (UseMemSetInBOT) {
|
||||||
|
memset(&_offset_array[left], offset, num_cards);
|
||||||
|
} else {
|
||||||
|
size_t i = left;
|
||||||
|
const size_t end = i + num_cards;
|
||||||
|
for (; i < end; i++) {
|
||||||
|
_offset_array[i] = offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
||||||
|
check_index(index, "index out of range");
|
||||||
|
assert(high >= low, "addresses out of order");
|
||||||
|
check_offset(pointer_delta(high, low), "offset too large");
|
||||||
|
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant of index_for that does not check the index for validity.
|
||||||
|
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
|
||||||
|
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
|
||||||
|
}
|
||||||
|
|
||||||
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
||||||
char* pc = (char*)p;
|
char* pc = (char*)p;
|
||||||
assert(pc >= (char*)_reserved.start() &&
|
assert(pc >= (char*)_reserved.start() &&
|
||||||
pc < (char*)_reserved.end(),
|
pc < (char*)_reserved.end(),
|
||||||
err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
|
err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||||
p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
|
p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
|
||||||
size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
|
size_t result = index_for_raw(p);
|
||||||
size_t result = delta >> LogN;
|
|
||||||
check_index(result, "bad index from address");
|
check_index(result, "bad index from address");
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -62,7 +117,7 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
|||||||
inline HeapWord*
|
inline HeapWord*
|
||||||
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||||
check_index(index, "index out of range");
|
check_index(index, "index out of range");
|
||||||
HeapWord* result = _reserved.start() + (index << LogN_words);
|
HeapWord* result = address_for_index_raw(index);
|
||||||
assert(result >= _reserved.start() && result < _reserved.end(),
|
assert(result >= _reserved.start() && result < _reserved.end(),
|
||||||
err_msg("bad address from index result " PTR_FORMAT
|
err_msg("bad address from index result " PTR_FORMAT
|
||||||
" _reserved.start() " PTR_FORMAT " _reserved.end() "
|
" _reserved.start() " PTR_FORMAT " _reserved.end() "
|
||||||
@ -71,6 +126,8 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef check_index
|
||||||
|
|
||||||
inline size_t
|
inline size_t
|
||||||
G1BlockOffsetArray::block_size(const HeapWord* p) const {
|
G1BlockOffsetArray::block_size(const HeapWord* p) const {
|
||||||
return gsp()->block_size(p);
|
return gsp()->block_size(p);
|
||||||
|
@ -33,31 +33,26 @@
|
|||||||
|
|
||||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
|
|
||||||
|
void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||||
|
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
|
||||||
|
_counts->clear_range(mr);
|
||||||
|
}
|
||||||
|
|
||||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||||
if (has_count_table()) {
|
if (has_count_table()) {
|
||||||
assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
|
|
||||||
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
|
|
||||||
assert(from_card_num < to_card_num,
|
assert(from_card_num < to_card_num,
|
||||||
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
|
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
|
||||||
from_card_num, to_card_num));
|
from_card_num, to_card_num));
|
||||||
assert(to_card_num <= _committed_max_card_num,
|
|
||||||
err_msg("to card num out of range: "
|
|
||||||
"to: "SIZE_FORMAT ", "
|
|
||||||
"max: "SIZE_FORMAT,
|
|
||||||
to_card_num, _committed_max_card_num));
|
|
||||||
|
|
||||||
to_card_num = MIN2(_committed_max_card_num, to_card_num);
|
|
||||||
|
|
||||||
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
||||||
_g1h(g1h), _card_counts(NULL),
|
_listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
|
||||||
_reserved_max_card_num(0), _committed_max_card_num(0),
|
_listener.set_cardcounts(this);
|
||||||
_committed_size(0) {}
|
}
|
||||||
|
|
||||||
void G1CardCounts::initialize() {
|
void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
|
||||||
assert(_g1h->max_capacity() > 0, "initialization order");
|
assert(_g1h->max_capacity() > 0, "initialization order");
|
||||||
assert(_g1h->capacity() == 0, "initialization order");
|
assert(_g1h->capacity() == 0, "initialization order");
|
||||||
|
|
||||||
@ -70,70 +65,9 @@ void G1CardCounts::initialize() {
|
|||||||
_ct_bs = _g1h->g1_barrier_set();
|
_ct_bs = _g1h->g1_barrier_set();
|
||||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||||
|
|
||||||
// Allocate/Reserve the counts table
|
_card_counts = (jubyte*) mapper->reserved().start();
|
||||||
size_t reserved_bytes = _g1h->max_capacity();
|
_reserved_max_card_num = mapper->reserved().byte_size();
|
||||||
_reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
|
mapper->set_mapping_changed_listener(&_listener);
|
||||||
|
|
||||||
size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
|
|
||||||
ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
|
|
||||||
if (!rs.is_reserved()) {
|
|
||||||
warning("Could not reserve enough space for the card counts table");
|
|
||||||
guarantee(!has_reserved_count_table(), "should be NULL");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
|
||||||
|
|
||||||
_card_counts_storage.initialize(rs, 0);
|
|
||||||
_card_counts = (jubyte*) _card_counts_storage.low();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CardCounts::resize(size_t heap_capacity) {
|
|
||||||
// Expand the card counts table to handle a heap with the given capacity.
|
|
||||||
|
|
||||||
if (!has_reserved_count_table()) {
|
|
||||||
// Don't expand if we failed to reserve the card counts table.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(_committed_size ==
|
|
||||||
ReservedSpace::allocation_align_size_up(_committed_size),
|
|
||||||
err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
|
|
||||||
|
|
||||||
// Verify that the committed space for the card counts matches our
|
|
||||||
// committed max card num. Note for some allocation alignments, the
|
|
||||||
// amount of space actually committed for the counts table will be able
|
|
||||||
// to span more cards than the number spanned by the maximum heap.
|
|
||||||
size_t prev_committed_size = _committed_size;
|
|
||||||
size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
|
|
||||||
|
|
||||||
assert(prev_committed_card_num == _committed_max_card_num,
|
|
||||||
err_msg("Card mismatch: "
|
|
||||||
"prev: " SIZE_FORMAT ", "
|
|
||||||
"committed: "SIZE_FORMAT", "
|
|
||||||
"reserved: "SIZE_FORMAT,
|
|
||||||
prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
|
|
||||||
|
|
||||||
size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
|
|
||||||
size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
|
|
||||||
size_t new_committed_card_num = committed_to_card_num(new_committed_size);
|
|
||||||
|
|
||||||
if (_committed_max_card_num < new_committed_card_num) {
|
|
||||||
// we need to expand the backing store for the card counts
|
|
||||||
size_t expand_size = new_committed_size - prev_committed_size;
|
|
||||||
|
|
||||||
if (!_card_counts_storage.expand_by(expand_size)) {
|
|
||||||
warning("Card counts table backing store commit failure");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(_card_counts_storage.committed_size() == new_committed_size,
|
|
||||||
"expansion commit failure");
|
|
||||||
|
|
||||||
_committed_size = new_committed_size;
|
|
||||||
_committed_max_card_num = new_committed_card_num;
|
|
||||||
|
|
||||||
clear_range(prev_committed_card_num, _committed_max_card_num);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,12 +83,13 @@ uint G1CardCounts::add_card_count(jbyte* card_ptr) {
|
|||||||
uint count = 0;
|
uint count = 0;
|
||||||
if (has_count_table()) {
|
if (has_count_table()) {
|
||||||
size_t card_num = ptr_2_card_num(card_ptr);
|
size_t card_num = ptr_2_card_num(card_ptr);
|
||||||
if (card_num < _committed_max_card_num) {
|
assert(card_num < _reserved_max_card_num,
|
||||||
count = (uint) _card_counts[card_num];
|
err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
|
||||||
if (count < G1ConcRSHotCardLimit) {
|
card_num, _reserved_max_card_num));
|
||||||
_card_counts[card_num] =
|
count = (uint) _card_counts[card_num];
|
||||||
(jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
|
if (count < G1ConcRSHotCardLimit) {
|
||||||
}
|
_card_counts[card_num] =
|
||||||
|
(jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
@ -165,31 +100,23 @@ bool G1CardCounts::is_hot(uint count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CardCounts::clear_region(HeapRegion* hr) {
|
void G1CardCounts::clear_region(HeapRegion* hr) {
|
||||||
assert(!hr->isHumongous(), "Should have been cleared");
|
MemRegion mr(hr->bottom(), hr->end());
|
||||||
|
clear_range(mr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CardCounts::clear_range(MemRegion mr) {
|
||||||
if (has_count_table()) {
|
if (has_count_table()) {
|
||||||
HeapWord* bottom = hr->bottom();
|
const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
|
||||||
|
// We use the last address in the range as the range could represent the
|
||||||
// We use the last address in hr as hr could be the
|
// last region in the heap. In which case trying to find the card will be an
|
||||||
// last region in the heap. In which case trying to find
|
// OOB access to the card table.
|
||||||
// the card for hr->end() will be an OOB access to the
|
const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
|
||||||
// card table.
|
|
||||||
HeapWord* last = hr->end() - 1;
|
|
||||||
assert(_g1h->g1_committed().contains(last),
|
|
||||||
err_msg("last not in committed: "
|
|
||||||
"last: " PTR_FORMAT ", "
|
|
||||||
"committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
|
||||||
last,
|
|
||||||
_g1h->g1_committed().start(),
|
|
||||||
_g1h->g1_committed().end()));
|
|
||||||
|
|
||||||
const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
|
|
||||||
const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
|
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
|
||||||
assert(start_addr == hr->bottom(), "alignment");
|
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
|
||||||
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
|
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
|
||||||
assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
|
assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
// Clear the counts for the (exclusive) card range.
|
// Clear the counts for the (exclusive) card range.
|
||||||
@ -199,14 +126,22 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class G1CardCountsClearClosure : public HeapRegionClosure {
|
||||||
|
private:
|
||||||
|
G1CardCounts* _card_counts;
|
||||||
|
public:
|
||||||
|
G1CardCountsClearClosure(G1CardCounts* card_counts) :
|
||||||
|
HeapRegionClosure(), _card_counts(card_counts) { }
|
||||||
|
|
||||||
|
|
||||||
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
|
_card_counts->clear_region(r);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void G1CardCounts::clear_all() {
|
void G1CardCounts::clear_all() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
|
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
|
||||||
clear_range((size_t)0, _committed_max_card_num);
|
G1CardCountsClearClosure cl(this);
|
||||||
|
_g1h->heap_region_iterate(&cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
G1CardCounts::~G1CardCounts() {
|
|
||||||
if (has_reserved_count_table()) {
|
|
||||||
_card_counts_storage.release();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
@ -25,14 +25,26 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "runtime/virtualspace.hpp"
|
#include "runtime/virtualspace.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
class CardTableModRefBS;
|
class CardTableModRefBS;
|
||||||
|
class G1CardCounts;
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
|
class G1RegionToSpaceMapper;
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
|
|
||||||
|
class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
|
||||||
|
private:
|
||||||
|
G1CardCounts* _counts;
|
||||||
|
public:
|
||||||
|
void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
|
||||||
|
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||||
|
};
|
||||||
|
|
||||||
// Table to track the number of times a card has been refined. Once
|
// Table to track the number of times a card has been refined. Once
|
||||||
// a card has been refined a certain number of times, it is
|
// a card has been refined a certain number of times, it is
|
||||||
// considered 'hot' and its refinement is delayed by inserting the
|
// considered 'hot' and its refinement is delayed by inserting the
|
||||||
@ -41,6 +53,8 @@ class HeapRegion;
|
|||||||
// is 'drained' during the next evacuation pause.
|
// is 'drained' during the next evacuation pause.
|
||||||
|
|
||||||
class G1CardCounts: public CHeapObj<mtGC> {
|
class G1CardCounts: public CHeapObj<mtGC> {
|
||||||
|
G1CardCountsMappingChangedListener _listener;
|
||||||
|
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
// The table of counts
|
// The table of counts
|
||||||
@ -49,27 +63,18 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
|||||||
// Max capacity of the reserved space for the counts table
|
// Max capacity of the reserved space for the counts table
|
||||||
size_t _reserved_max_card_num;
|
size_t _reserved_max_card_num;
|
||||||
|
|
||||||
// Max capacity of the committed space for the counts table
|
|
||||||
size_t _committed_max_card_num;
|
|
||||||
|
|
||||||
// Size of committed space for the counts table
|
|
||||||
size_t _committed_size;
|
|
||||||
|
|
||||||
// CardTable bottom.
|
// CardTable bottom.
|
||||||
const jbyte* _ct_bot;
|
const jbyte* _ct_bot;
|
||||||
|
|
||||||
// Barrier set
|
// Barrier set
|
||||||
CardTableModRefBS* _ct_bs;
|
CardTableModRefBS* _ct_bs;
|
||||||
|
|
||||||
// The virtual memory backing the counts table
|
|
||||||
VirtualSpace _card_counts_storage;
|
|
||||||
|
|
||||||
// Returns true if the card counts table has been reserved.
|
// Returns true if the card counts table has been reserved.
|
||||||
bool has_reserved_count_table() { return _card_counts != NULL; }
|
bool has_reserved_count_table() { return _card_counts != NULL; }
|
||||||
|
|
||||||
// Returns true if the card counts table has been reserved and committed.
|
// Returns true if the card counts table has been reserved and committed.
|
||||||
bool has_count_table() {
|
bool has_count_table() {
|
||||||
return has_reserved_count_table() && _committed_max_card_num > 0;
|
return has_reserved_count_table();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ptr_2_card_num(const jbyte* card_ptr) {
|
size_t ptr_2_card_num(const jbyte* card_ptr) {
|
||||||
@ -79,37 +84,24 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
|||||||
"_ct_bot: " PTR_FORMAT,
|
"_ct_bot: " PTR_FORMAT,
|
||||||
p2i(card_ptr), p2i(_ct_bot)));
|
p2i(card_ptr), p2i(_ct_bot)));
|
||||||
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
|
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
|
||||||
assert(card_num >= 0 && card_num < _committed_max_card_num,
|
assert(card_num >= 0 && card_num < _reserved_max_card_num,
|
||||||
err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
|
err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
|
||||||
return card_num;
|
return card_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
jbyte* card_num_2_ptr(size_t card_num) {
|
jbyte* card_num_2_ptr(size_t card_num) {
|
||||||
assert(card_num >= 0 && card_num < _committed_max_card_num,
|
assert(card_num >= 0 && card_num < _reserved_max_card_num,
|
||||||
err_msg("card num out of range: "SIZE_FORMAT, card_num));
|
err_msg("card num out of range: "SIZE_FORMAT, card_num));
|
||||||
return (jbyte*) (_ct_bot + card_num);
|
return (jbyte*) (_ct_bot + card_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper routine.
|
|
||||||
// Returns the number of cards that can be counted by the given committed
|
|
||||||
// table size, with a maximum of the number of cards spanned by the max
|
|
||||||
// capacity of the heap.
|
|
||||||
size_t committed_to_card_num(size_t committed_size) {
|
|
||||||
return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear the counts table for the given (exclusive) index range.
|
// Clear the counts table for the given (exclusive) index range.
|
||||||
void clear_range(size_t from_card_num, size_t to_card_num);
|
void clear_range(size_t from_card_num, size_t to_card_num);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1CardCounts(G1CollectedHeap* g1h);
|
G1CardCounts(G1CollectedHeap* g1h);
|
||||||
~G1CardCounts();
|
|
||||||
|
|
||||||
void initialize();
|
void initialize(G1RegionToSpaceMapper* mapper);
|
||||||
|
|
||||||
// Resize the committed space for the card counts table in
|
|
||||||
// response to a resize of the committed space for the heap.
|
|
||||||
void resize(size_t heap_capacity);
|
|
||||||
|
|
||||||
// Increments the refinement count for the given card.
|
// Increments the refinement count for the given card.
|
||||||
// Returns the pre-increment count value.
|
// Returns the pre-increment count value.
|
||||||
@ -122,8 +114,10 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
|||||||
// Clears the card counts for the cards spanned by the region
|
// Clears the card counts for the cards spanned by the region
|
||||||
void clear_region(HeapRegion* hr);
|
void clear_region(HeapRegion* hr);
|
||||||
|
|
||||||
|
// Clears the card counts for the cards spanned by the MemRegion
|
||||||
|
void clear_range(MemRegion mr);
|
||||||
|
|
||||||
// Clear the entire card counts table during GC.
|
// Clear the entire card counts table during GC.
|
||||||
// Updates the policy stats with the duration.
|
|
||||||
void clear_all();
|
void clear_all();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
|
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||||
#include "gc_implementation/g1/g1YCTypes.hpp"
|
#include "gc_implementation/g1/g1YCTypes.hpp"
|
||||||
@ -381,6 +382,14 @@ void YoungList::print() {
|
|||||||
gclog_or_tty->cr();
|
gclog_or_tty->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
|
||||||
|
OtherRegionsTable::invalidate(start_idx, num_regions);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||||
|
reset_from_card_cache(start_idx, num_regions);
|
||||||
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
|
void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
|
||||||
{
|
{
|
||||||
// Claim the right to put the region on the dirty cards region list
|
// Claim the right to put the region on the dirty cards region list
|
||||||
@ -760,13 +769,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||||||
// to know in which list they are on so that we can remove them. We only
|
// to know in which list they are on so that we can remove them. We only
|
||||||
// need to do this if we need to allocate more than one region to satisfy the
|
// need to do this if we need to allocate more than one region to satisfy the
|
||||||
// current humongous allocation request. If we are only allocating one region
|
// current humongous allocation request. If we are only allocating one region
|
||||||
// we use the one-region region allocation code (see above), or end up here.
|
// we use the one-region region allocation code (see above), that already
|
||||||
|
// potentially waits for regions from the secondary free list.
|
||||||
wait_while_free_regions_coming();
|
wait_while_free_regions_coming();
|
||||||
append_secondary_free_list_if_not_empty_with_lock();
|
append_secondary_free_list_if_not_empty_with_lock();
|
||||||
|
|
||||||
// Policy: Try only empty regions (i.e. already committed first). Maybe we
|
// Policy: Try only empty regions (i.e. already committed first). Maybe we
|
||||||
// are lucky enough to find some.
|
// are lucky enough to find some.
|
||||||
first = _hrs.find_contiguous(obj_regions, true);
|
first = _hrs.find_contiguous_only_empty(obj_regions);
|
||||||
if (first != G1_NO_HRS_INDEX) {
|
if (first != G1_NO_HRS_INDEX) {
|
||||||
_hrs.allocate_free_regions_starting_at(first, obj_regions);
|
_hrs.allocate_free_regions_starting_at(first, obj_regions);
|
||||||
}
|
}
|
||||||
@ -776,7 +786,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||||||
// Policy: We could not find enough regions for the humongous object in the
|
// Policy: We could not find enough regions for the humongous object in the
|
||||||
// free list. Look through the heap to find a mix of free and uncommitted regions.
|
// free list. Look through the heap to find a mix of free and uncommitted regions.
|
||||||
// If so, try expansion.
|
// If so, try expansion.
|
||||||
first = _hrs.find_contiguous(obj_regions, false);
|
first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
|
||||||
if (first != G1_NO_HRS_INDEX) {
|
if (first != G1_NO_HRS_INDEX) {
|
||||||
// We found something. Make sure these regions are committed, i.e. expand
|
// We found something. Make sure these regions are committed, i.e. expand
|
||||||
// the heap. Alternatively we could do a defragmentation GC.
|
// the heap. Alternatively we could do a defragmentation GC.
|
||||||
@ -1954,8 +1964,6 @@ jint G1CollectedHeap::initialize() {
|
|||||||
_reserved.set_start((HeapWord*)heap_rs.base());
|
_reserved.set_start((HeapWord*)heap_rs.base());
|
||||||
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
_expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
|
|
||||||
|
|
||||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||||
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
||||||
set_barrier_set(rem_set()->bs());
|
set_barrier_set(rem_set()->bs());
|
||||||
@ -1970,14 +1978,64 @@ jint G1CollectedHeap::initialize() {
|
|||||||
// Carve out the G1 part of the heap.
|
// Carve out the G1 part of the heap.
|
||||||
|
|
||||||
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
|
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
|
||||||
_hrs.initialize(g1_rs);
|
G1RegionToSpaceMapper* heap_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(g1_rs,
|
||||||
|
UseLargePages ? os::large_page_size() : os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
1,
|
||||||
|
mtJavaHeap);
|
||||||
|
heap_storage->set_mapping_changed_listener(&_listener);
|
||||||
|
|
||||||
assert(_hrs.max_length() == _expansion_regions,
|
// Reserve space for the block offset table. We do not support automatic uncommit
|
||||||
err_msg("max length: %u expansion regions: %u",
|
// for the card table at this time. BOT only.
|
||||||
_hrs.max_length(), _expansion_regions));
|
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
|
||||||
|
G1RegionToSpaceMapper* bot_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
G1BlockOffsetSharedArray::N_bytes,
|
||||||
|
mtGC);
|
||||||
|
|
||||||
// Do later initialization work for concurrent refinement.
|
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
|
||||||
_cg1r->init();
|
G1RegionToSpaceMapper* cardtable_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(cardtable_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
G1BlockOffsetSharedArray::N_bytes,
|
||||||
|
mtGC);
|
||||||
|
|
||||||
|
// Reserve space for the card counts table.
|
||||||
|
ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
|
||||||
|
G1RegionToSpaceMapper* card_counts_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(card_counts_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
G1BlockOffsetSharedArray::N_bytes,
|
||||||
|
mtGC);
|
||||||
|
|
||||||
|
// Reserve space for prev and next bitmap.
|
||||||
|
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
|
||||||
|
|
||||||
|
ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
|
||||||
|
G1RegionToSpaceMapper* prev_bitmap_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
CMBitMap::mark_distance(),
|
||||||
|
mtGC);
|
||||||
|
|
||||||
|
ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
|
||||||
|
G1RegionToSpaceMapper* next_bitmap_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
CMBitMap::mark_distance(),
|
||||||
|
mtGC);
|
||||||
|
|
||||||
|
_hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||||
|
g1_barrier_set()->initialize(cardtable_storage);
|
||||||
|
// Do later initialization work for concurrent refinement.
|
||||||
|
_cg1r->init(card_counts_storage);
|
||||||
|
|
||||||
// 6843694 - ensure that the maximum region index can fit
|
// 6843694 - ensure that the maximum region index can fit
|
||||||
// in the remembered set structures.
|
// in the remembered set structures.
|
||||||
@ -1991,8 +2049,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
||||||
|
|
||||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
|
||||||
heap_word_size(init_byte_size));
|
|
||||||
|
|
||||||
_g1h = this;
|
_g1h = this;
|
||||||
|
|
||||||
@ -2001,7 +2058,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// Create the ConcurrentMark data structure and thread.
|
// Create the ConcurrentMark data structure and thread.
|
||||||
// (Must do this late, so that "max_regions" is defined.)
|
// (Must do this late, so that "max_regions" is defined.)
|
||||||
_cm = new ConcurrentMark(this, heap_rs);
|
_cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
|
||||||
if (_cm == NULL || !_cm->completed_initialization()) {
|
if (_cm == NULL || !_cm->completed_initialization()) {
|
||||||
vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
|
vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
@ -2058,8 +2115,8 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
// Here we allocate the dummy HeapRegion that is required by the
|
// Here we allocate the dummy HeapRegion that is required by the
|
||||||
// G1AllocRegion class.
|
// G1AllocRegion class.
|
||||||
|
|
||||||
HeapRegion* dummy_region = _hrs.get_dummy_region();
|
HeapRegion* dummy_region = _hrs.get_dummy_region();
|
||||||
|
|
||||||
// We'll re-use the same region whether the alloc region will
|
// We'll re-use the same region whether the alloc region will
|
||||||
// require BOT updates or not and, if it doesn't, then a non-young
|
// require BOT updates or not and, if it doesn't, then a non-young
|
||||||
// region will complain that it cannot support allocations without
|
// region will complain that it cannot support allocations without
|
||||||
@ -2480,8 +2537,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectedHeap::is_in(const void* p) const {
|
bool G1CollectedHeap::is_in(const void* p) const {
|
||||||
if (_hrs.committed().contains(p)) {
|
if (_hrs.reserved().contains(p)) {
|
||||||
// Given that we know that p is in the committed space,
|
// Given that we know that p is in the reserved space,
|
||||||
// heap_region_containing_raw() should successfully
|
// heap_region_containing_raw() should successfully
|
||||||
// return the containing region.
|
// return the containing region.
|
||||||
HeapRegion* hr = heap_region_containing_raw(p);
|
HeapRegion* hr = heap_region_containing_raw(p);
|
||||||
@ -2491,6 +2548,18 @@ bool G1CollectedHeap::is_in(const void* p) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
bool G1CollectedHeap::is_in_exact(const void* p) const {
|
||||||
|
bool contains = reserved_region().contains(p);
|
||||||
|
bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
|
||||||
|
if (contains && available) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Iteration functions.
|
// Iteration functions.
|
||||||
|
|
||||||
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
|
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
|
||||||
@ -3368,8 +3437,8 @@ void G1CollectedHeap::print_on(outputStream* st) const {
|
|||||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||||
capacity()/K, used_unlocked()/K);
|
capacity()/K, used_unlocked()/K);
|
||||||
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
||||||
_hrs.committed().start(),
|
_hrs.reserved().start(),
|
||||||
_hrs.committed().end(),
|
_hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
|
||||||
_hrs.reserved().end());
|
_hrs.reserved().end());
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
||||||
@ -4121,10 +4190,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
// RETIRE events are generated before the end GC event.
|
// RETIRE events are generated before the end GC event.
|
||||||
_hr_printer.end_gc(false /* full */, (size_t) total_collections());
|
_hr_printer.end_gc(false /* full */, (size_t) total_collections());
|
||||||
|
|
||||||
if (mark_in_progress()) {
|
|
||||||
concurrent_mark()->update_heap_boundaries(_hrs.committed());
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TRACESPINNING
|
#ifdef TRACESPINNING
|
||||||
ParallelTaskTerminator::print_termination_counts();
|
ParallelTaskTerminator::print_termination_counts();
|
||||||
#endif
|
#endif
|
||||||
|
@ -206,6 +206,13 @@ public:
|
|||||||
|
|
||||||
class RefineCardTableEntryClosure;
|
class RefineCardTableEntryClosure;
|
||||||
|
|
||||||
|
class G1RegionMappingChangedListener : public G1MappingChangedListener {
|
||||||
|
private:
|
||||||
|
void reset_from_card_cache(uint start_idx, size_t num_regions);
|
||||||
|
public:
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||||
|
};
|
||||||
|
|
||||||
class G1CollectedHeap : public SharedHeap {
|
class G1CollectedHeap : public SharedHeap {
|
||||||
friend class VM_CollectForMetadataAllocation;
|
friend class VM_CollectForMetadataAllocation;
|
||||||
friend class VM_G1CollectForAllocation;
|
friend class VM_G1CollectForAllocation;
|
||||||
@ -280,6 +287,9 @@ private:
|
|||||||
// after heap shrinking (free_list_only == true).
|
// after heap shrinking (free_list_only == true).
|
||||||
void rebuild_region_sets(bool free_list_only);
|
void rebuild_region_sets(bool free_list_only);
|
||||||
|
|
||||||
|
// Callback for region mapping changed events.
|
||||||
|
G1RegionMappingChangedListener _listener;
|
||||||
|
|
||||||
// The sequence of all heap regions in the heap.
|
// The sequence of all heap regions in the heap.
|
||||||
HeapRegionSeq _hrs;
|
HeapRegionSeq _hrs;
|
||||||
|
|
||||||
@ -851,11 +861,6 @@ protected:
|
|||||||
CodeBlobClosure* scan_strong_code,
|
CodeBlobClosure* scan_strong_code,
|
||||||
uint worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
// Notifies all the necessary spaces that the committed space has
|
|
||||||
// been updated (either expanded or shrunk). It should be called
|
|
||||||
// after _g1_storage is updated.
|
|
||||||
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
|
|
||||||
|
|
||||||
// The concurrent marker (and the thread it runs in.)
|
// The concurrent marker (and the thread it runs in.)
|
||||||
ConcurrentMark* _cm;
|
ConcurrentMark* _cm;
|
||||||
ConcurrentMarkThread* _cmThread;
|
ConcurrentMarkThread* _cmThread;
|
||||||
@ -1286,6 +1291,11 @@ public:
|
|||||||
|
|
||||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||||
virtual bool is_in(const void* p) const;
|
virtual bool is_in(const void* p) const;
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Returns whether p is in one of the available areas of the heap. Slow but
|
||||||
|
// extensive version.
|
||||||
|
bool is_in_exact(const void* p) const;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Return "TRUE" iff the given object address is within the collection
|
// Return "TRUE" iff the given object address is within the collection
|
||||||
// set. Slow implementation.
|
// set. Slow implementation.
|
||||||
@ -1355,16 +1365,10 @@ public:
|
|||||||
return _hrs.reserved();
|
return _hrs.reserved();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a MemRegion that corresponds to the space that has been
|
|
||||||
// committed in the heap
|
|
||||||
MemRegion g1_committed() {
|
|
||||||
return _hrs.committed();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool is_in_closed_subset(const void* p) const;
|
virtual bool is_in_closed_subset(const void* p) const;
|
||||||
|
|
||||||
G1SATBCardTableModRefBS* g1_barrier_set() {
|
G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
|
||||||
return (G1SATBCardTableModRefBS*) barrier_set();
|
return (G1SATBCardTableLoggingModRefBS*) barrier_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This resets the card table to all zeros. It is used after
|
// This resets the card table to all zeros. It is used after
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
||||||
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
|
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
|
||||||
|
|
||||||
void G1HotCardCache::initialize() {
|
void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||||
if (default_use_cache()) {
|
if (default_use_cache()) {
|
||||||
_use_cache = true;
|
_use_cache = true;
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ void G1HotCardCache::initialize() {
|
|||||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
|
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
|
||||||
_hot_cache_par_claimed_idx = 0;
|
_hot_cache_par_claimed_idx = 0;
|
||||||
|
|
||||||
_card_counts.initialize();
|
_card_counts.initialize(card_counts_storage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,11 +135,8 @@ void G1HotCardCache::drain(uint worker_i,
|
|||||||
// above, are discarded prior to re-enabling the cache near the end of the GC.
|
// above, are discarded prior to re-enabling the cache near the end of the GC.
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
|
|
||||||
_card_counts.resize(heap_capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
|
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
|
||||||
|
assert(!hr->isHumongous(), "Should have been cleared");
|
||||||
_card_counts.clear_region(hr);
|
_card_counts.clear_region(hr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
|||||||
G1HotCardCache(G1CollectedHeap* g1h);
|
G1HotCardCache(G1CollectedHeap* g1h);
|
||||||
~G1HotCardCache();
|
~G1HotCardCache();
|
||||||
|
|
||||||
void initialize();
|
void initialize(G1RegionToSpaceMapper* card_counts_storage);
|
||||||
|
|
||||||
bool use_cache() { return _use_cache; }
|
bool use_cache() { return _use_cache; }
|
||||||
|
|
||||||
@ -115,9 +115,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
bool hot_cache_is_empty() { return _n_hot == 0; }
|
bool hot_cache_is_empty() { return _n_hot == 0; }
|
||||||
|
|
||||||
// Resizes the card counts table to match the given capacity
|
|
||||||
void resize_card_counts(size_t heap_capacity);
|
|
||||||
|
|
||||||
// Zeros the values in the card counts table for entire committed heap
|
// Zeros the values in the card counts table for entire committed heap
|
||||||
void reset_card_counts();
|
void reset_card_counts();
|
||||||
|
|
||||||
|
@ -0,0 +1,171 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
|
||||||
|
#include "oops/markOop.hpp"
|
||||||
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
|
#ifdef TARGET_OS_FAMILY_linux
|
||||||
|
# include "os_linux.inline.hpp"
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_OS_FAMILY_solaris
|
||||||
|
# include "os_solaris.inline.hpp"
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_OS_FAMILY_windows
|
||||||
|
# include "os_windows.inline.hpp"
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_OS_FAMILY_aix
|
||||||
|
# include "os_aix.inline.hpp"
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_OS_FAMILY_bsd
|
||||||
|
# include "os_bsd.inline.hpp"
|
||||||
|
#endif
|
||||||
|
#include "utilities/bitMap.inline.hpp"
|
||||||
|
|
||||||
|
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
|
||||||
|
_high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) {
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
|
||||||
|
if (!rs.is_reserved()) {
|
||||||
|
return false; // Allocation failed.
|
||||||
|
}
|
||||||
|
assert(_low_boundary == NULL, "VirtualSpace already initialized");
|
||||||
|
assert(page_size > 0, "Granularity must be non-zero.");
|
||||||
|
|
||||||
|
_low_boundary = rs.base();
|
||||||
|
_high_boundary = _low_boundary + rs.size();
|
||||||
|
|
||||||
|
_special = rs.special();
|
||||||
|
_executable = rs.executable();
|
||||||
|
|
||||||
|
_page_size = page_size;
|
||||||
|
|
||||||
|
assert(_committed.size() == 0, "virtual space initialized more than once");
|
||||||
|
uintx size_in_bits = rs.size() / page_size;
|
||||||
|
_committed.resize(size_in_bits, /* in_resource_area */ false);
|
||||||
|
|
||||||
|
if (_special) {
|
||||||
|
_committed.set_range(0, size_in_bits);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
|
||||||
|
release();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PageBasedVirtualSpace::release() {
|
||||||
|
// This does not release memory it never reserved.
|
||||||
|
// Caller must release via rs.release();
|
||||||
|
_low_boundary = NULL;
|
||||||
|
_high_boundary = NULL;
|
||||||
|
_special = false;
|
||||||
|
_executable = false;
|
||||||
|
_page_size = 0;
|
||||||
|
_committed.resize(0, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1PageBasedVirtualSpace::committed_size() const {
|
||||||
|
return _committed.count_one_bits() * _page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1PageBasedVirtualSpace::reserved_size() const {
|
||||||
|
return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1PageBasedVirtualSpace::uncommitted_size() const {
|
||||||
|
return reserved_size() - committed_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
|
||||||
|
return (addr - _low_boundary) / _page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
|
||||||
|
uintptr_t end = start + size_in_pages;
|
||||||
|
return _committed.get_next_zero_offset(start, end) >= end;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
|
||||||
|
uintptr_t end = start + size_in_pages;
|
||||||
|
return _committed.get_next_one_offset(start, end) >= end;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
|
||||||
|
return _low_boundary + index * _page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
|
||||||
|
return num * _page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
|
||||||
|
// We need to make sure to commit all pages covered by the given area.
|
||||||
|
guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
|
||||||
|
|
||||||
|
if (!_special) {
|
||||||
|
os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
|
||||||
|
err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
|
||||||
|
}
|
||||||
|
_committed.set_range(start, start + size_in_pages);
|
||||||
|
|
||||||
|
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
|
||||||
|
guarantee(is_area_committed(start, size_in_pages), "checking");
|
||||||
|
|
||||||
|
if (!_special) {
|
||||||
|
os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
|
||||||
|
}
|
||||||
|
|
||||||
|
_committed.clear_range(start, start + size_in_pages);
|
||||||
|
|
||||||
|
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1PageBasedVirtualSpace::contains(const void* p) const {
|
||||||
|
return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void G1PageBasedVirtualSpace::print_on(outputStream* out) {
|
||||||
|
out->print ("Virtual space:");
|
||||||
|
if (special()) out->print(" (pinned in memory)");
|
||||||
|
out->cr();
|
||||||
|
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
|
||||||
|
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
|
||||||
|
out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PageBasedVirtualSpace::print() {
|
||||||
|
print_on(tty);
|
||||||
|
}
|
||||||
|
#endif
|
@ -0,0 +1,111 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/memRegion.hpp"
|
||||||
|
#include "runtime/virtualspace.hpp"
|
||||||
|
#include "utilities/bitMap.hpp"
|
||||||
|
|
||||||
|
// Virtual space management helper for a virtual space with an OS page allocation
|
||||||
|
// granularity.
|
||||||
|
// (De-)Allocation requests are always OS page aligned by passing a page index
|
||||||
|
// and multiples of pages.
|
||||||
|
// The implementation gives an error when trying to commit or uncommit pages that
|
||||||
|
// have already been committed or uncommitted.
|
||||||
|
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||||
|
friend class VMStructs;
|
||||||
|
private:
|
||||||
|
// Reserved area addresses.
|
||||||
|
char* _low_boundary;
|
||||||
|
char* _high_boundary;
|
||||||
|
|
||||||
|
// The commit/uncommit granularity in bytes.
|
||||||
|
size_t _page_size;
|
||||||
|
|
||||||
|
// Bitmap used for verification of commit/uncommit operations.
|
||||||
|
BitMap _committed;
|
||||||
|
|
||||||
|
// Indicates that the entire space has been committed and pinned in memory,
|
||||||
|
// os::commit_memory() or os::uncommit_memory() have no function.
|
||||||
|
bool _special;
|
||||||
|
|
||||||
|
// Indicates whether the committed space should be executable.
|
||||||
|
bool _executable;
|
||||||
|
|
||||||
|
// Returns the index of the page which contains the given address.
|
||||||
|
uintptr_t addr_to_page_index(char* addr) const;
|
||||||
|
// Returns the address of the given page index.
|
||||||
|
char* page_start(uintptr_t index);
|
||||||
|
// Returns the byte size of the given number of pages.
|
||||||
|
size_t byte_size_for_pages(size_t num);
|
||||||
|
|
||||||
|
// Returns true if the entire area is backed by committed memory.
|
||||||
|
bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
|
||||||
|
// Returns true if the entire area is not backed by committed memory.
|
||||||
|
bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// Commit the given area of pages starting at start being size_in_pages large.
|
||||||
|
MemRegion commit(uintptr_t start, size_t size_in_pages);
|
||||||
|
|
||||||
|
// Uncommit the given area of pages starting at start being size_in_pages large.
|
||||||
|
MemRegion uncommit(uintptr_t start, size_t size_in_pages);
|
||||||
|
|
||||||
|
bool special() const { return _special; }
|
||||||
|
|
||||||
|
// Initialization
|
||||||
|
G1PageBasedVirtualSpace();
|
||||||
|
bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
|
||||||
|
|
||||||
|
// Destruction
|
||||||
|
~G1PageBasedVirtualSpace();
|
||||||
|
|
||||||
|
// Amount of reserved memory.
|
||||||
|
size_t reserved_size() const;
|
||||||
|
// Memory used in this virtual space.
|
||||||
|
size_t committed_size() const;
|
||||||
|
// Memory left to use/expand in this virtual space.
|
||||||
|
size_t uncommitted_size() const;
|
||||||
|
|
||||||
|
bool contains(const void* p) const;
|
||||||
|
|
||||||
|
MemRegion reserved() {
|
||||||
|
MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
void release();
|
||||||
|
|
||||||
|
void check_for_contiguity() PRODUCT_RETURN;
|
||||||
|
|
||||||
|
// Debugging
|
||||||
|
void print_on(outputStream* out) PRODUCT_RETURN;
|
||||||
|
void print();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
|
@ -0,0 +1,158 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
|
#include "runtime/virtualspace.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
|
#include "utilities/bitMap.inline.hpp"
|
||||||
|
|
||||||
|
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||||
|
size_t commit_granularity,
|
||||||
|
size_t region_granularity,
|
||||||
|
MemoryType type) :
|
||||||
|
_storage(),
|
||||||
|
_commit_granularity(commit_granularity),
|
||||||
|
_region_granularity(region_granularity),
|
||||||
|
_listener(NULL),
|
||||||
|
_commit_map() {
|
||||||
|
guarantee(is_power_of_2(commit_granularity), "must be");
|
||||||
|
guarantee(is_power_of_2(region_granularity), "must be");
|
||||||
|
_storage.initialize_with_granularity(rs, commit_granularity);
|
||||||
|
|
||||||
|
MemTracker::record_virtual_memory_type((address)rs.base(), type);
|
||||||
|
}
|
||||||
|
|
||||||
|
// G1RegionToSpaceMapper implementation where the region granularity is larger than
|
||||||
|
// or the same as the commit granularity.
|
||||||
|
// Basically, the space corresponding to one region region spans several OS pages.
|
||||||
|
class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||||
|
private:
|
||||||
|
size_t _pages_per_region;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
|
||||||
|
size_t os_commit_granularity,
|
||||||
|
size_t alloc_granularity,
|
||||||
|
size_t commit_factor,
|
||||||
|
MemoryType type) :
|
||||||
|
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
|
||||||
|
_pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
|
||||||
|
|
||||||
|
guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
|
||||||
|
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||||
|
_storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||||
|
_commit_map.set_range(start_idx, start_idx + num_regions);
|
||||||
|
fire_on_commit(start_idx, num_regions);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||||
|
_storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||||
|
_commit_map.clear_range(start_idx, start_idx + num_regions);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// G1RegionToSpaceMapper implementation where the region granularity is smaller
|
||||||
|
// than the commit granularity.
|
||||||
|
// Basically, the contents of one OS page span several regions.
|
||||||
|
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||||
|
private:
|
||||||
|
class CommitRefcountArray : public G1BiasedMappedArray<uint> {
|
||||||
|
protected:
|
||||||
|
virtual uint default_value() const { return 0; }
|
||||||
|
};
|
||||||
|
|
||||||
|
size_t _regions_per_page;
|
||||||
|
|
||||||
|
CommitRefcountArray _refcounts;
|
||||||
|
|
||||||
|
uintptr_t region_idx_to_page_idx(uint region) const {
|
||||||
|
return region / _regions_per_page;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
|
||||||
|
size_t os_commit_granularity,
|
||||||
|
size_t alloc_granularity,
|
||||||
|
size_t commit_factor,
|
||||||
|
MemoryType type) :
|
||||||
|
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
|
||||||
|
_regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
|
||||||
|
|
||||||
|
guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
||||||
|
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
|
||||||
|
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||||
|
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
|
||||||
|
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
|
||||||
|
uintptr_t idx = region_idx_to_page_idx(i);
|
||||||
|
uint old_refcount = _refcounts.get_by_index(idx);
|
||||||
|
if (old_refcount == 0) {
|
||||||
|
_storage.commit(idx, 1);
|
||||||
|
}
|
||||||
|
_refcounts.set_by_index(idx, old_refcount + 1);
|
||||||
|
_commit_map.set_bit(i);
|
||||||
|
fire_on_commit(i, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||||
|
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
|
||||||
|
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
|
||||||
|
uintptr_t idx = region_idx_to_page_idx(i);
|
||||||
|
uint old_refcount = _refcounts.get_by_index(idx);
|
||||||
|
assert(old_refcount > 0, "must be");
|
||||||
|
if (old_refcount == 1) {
|
||||||
|
_storage.uncommit(idx, 1);
|
||||||
|
}
|
||||||
|
_refcounts.set_by_index(idx, old_refcount - 1);
|
||||||
|
_commit_map.clear_bit(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) {
|
||||||
|
if (_listener != NULL) {
|
||||||
|
_listener->on_commit(start_idx, num_regions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
|
||||||
|
size_t os_commit_granularity,
|
||||||
|
size_t region_granularity,
|
||||||
|
size_t commit_factor,
|
||||||
|
MemoryType type) {
|
||||||
|
|
||||||
|
if (region_granularity >= (os_commit_granularity * commit_factor)) {
|
||||||
|
return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
|
||||||
|
} else {
|
||||||
|
return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,82 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
|
||||||
|
#include "utilities/debug.hpp"
|
||||||
|
|
||||||
|
class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
|
||||||
|
public:
|
||||||
|
// Fired after commit of the memory, i.e. the memory this listener is registered
|
||||||
|
// for can be accessed.
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Maps region based commit/uncommit requests to the underlying page sized virtual
|
||||||
|
// space.
|
||||||
|
class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||||
|
private:
|
||||||
|
G1MappingChangedListener* _listener;
|
||||||
|
protected:
|
||||||
|
// Backing storage.
|
||||||
|
G1PageBasedVirtualSpace _storage;
|
||||||
|
size_t _commit_granularity;
|
||||||
|
size_t _region_granularity;
|
||||||
|
// Mapping management
|
||||||
|
BitMap _commit_map;
|
||||||
|
|
||||||
|
G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
|
||||||
|
|
||||||
|
void fire_on_commit(uint start_idx, size_t num_regions);
|
||||||
|
public:
|
||||||
|
MemRegion reserved() { return _storage.reserved(); }
|
||||||
|
|
||||||
|
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
|
||||||
|
|
||||||
|
virtual ~G1RegionToSpaceMapper() {
|
||||||
|
_commit_map.resize(0, /* in_resource_area */ false);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_committed(uintptr_t idx) const {
|
||||||
|
return _commit_map.at(idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
|
||||||
|
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
|
||||||
|
|
||||||
|
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
|
||||||
|
// The byte_translation_factor defines how many bytes in a region correspond to
|
||||||
|
// a single byte in the data structure this mapper is for.
|
||||||
|
// Eg. in the card table, this value corresponds to the size a single card
|
||||||
|
// table entry corresponds to.
|
||||||
|
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
|
||||||
|
size_t os_commit_granularity,
|
||||||
|
size_t region_granularity,
|
||||||
|
size_t byte_translation_factor,
|
||||||
|
MemoryType type);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
|
@ -540,6 +540,12 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
|||||||
|
|
||||||
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||||
bool check_for_refs_into_cset) {
|
bool check_for_refs_into_cset) {
|
||||||
|
assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
|
||||||
|
err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
|
||||||
|
p2i(card_ptr),
|
||||||
|
_ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
|
||||||
|
_ct_bs->addr_for(card_ptr),
|
||||||
|
_g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
|
||||||
|
|
||||||
// If the card is no longer dirty, nothing to do.
|
// If the card is no longer dirty, nothing to do.
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc_implementation/g1/heapRegion.hpp"
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
#include "gc_implementation/g1/satbQueue.hpp"
|
#include "gc_implementation/g1/satbQueue.hpp"
|
||||||
@ -38,7 +39,6 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
|
|||||||
_kind = G1SATBCT;
|
_kind = G1SATBCT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||||
// Nulls should have been already filtered.
|
// Nulls should have been already filtered.
|
||||||
assert(pre_val->is_oop(true), "Error");
|
assert(pre_val->is_oop(true), "Error");
|
||||||
@ -125,13 +125,52 @@ void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||||
|
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
|
||||||
|
_card_table->clear(mr);
|
||||||
|
}
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS::
|
G1SATBCardTableLoggingModRefBS::
|
||||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||||
int max_covered_regions) :
|
int max_covered_regions) :
|
||||||
G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
|
G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
|
||||||
_dcqs(JavaThread::dirty_card_queue_set())
|
_dcqs(JavaThread::dirty_card_queue_set()),
|
||||||
|
_listener()
|
||||||
{
|
{
|
||||||
_kind = G1SATBCTLogging;
|
_kind = G1SATBCTLogging;
|
||||||
|
_listener.set_card_table(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
|
||||||
|
mapper->set_mapping_changed_listener(&_listener);
|
||||||
|
|
||||||
|
_byte_map_size = mapper->reserved().byte_size();
|
||||||
|
|
||||||
|
_guard_index = cards_required(_whole_heap.word_size()) - 1;
|
||||||
|
_last_valid_index = _guard_index - 1;
|
||||||
|
|
||||||
|
HeapWord* low_bound = _whole_heap.start();
|
||||||
|
HeapWord* high_bound = _whole_heap.end();
|
||||||
|
|
||||||
|
_cur_covered_regions = 1;
|
||||||
|
_covered[0] = _whole_heap;
|
||||||
|
|
||||||
|
_byte_map = (jbyte*) mapper->reserved().start();
|
||||||
|
byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
||||||
|
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
|
||||||
|
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
|
||||||
|
|
||||||
|
if (TraceCardTableModRefBS) {
|
||||||
|
gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
|
||||||
|
gclog_or_tty->print_cr(" "
|
||||||
|
" &_byte_map[0]: " INTPTR_FORMAT
|
||||||
|
" &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
|
||||||
|
p2i(&_byte_map[0]),
|
||||||
|
p2i(&_byte_map[_last_valid_index]));
|
||||||
|
gclog_or_tty->print_cr(" "
|
||||||
|
" byte_map_base: " INTPTR_FORMAT,
|
||||||
|
p2i(byte_map_base));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "memory/cardTableModRefBS.hpp"
|
#include "memory/cardTableModRefBS.hpp"
|
||||||
#include "memory/memRegion.hpp"
|
#include "memory/memRegion.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
@ -33,6 +34,7 @@
|
|||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
|
||||||
class DirtyCardQueueSet;
|
class DirtyCardQueueSet;
|
||||||
|
class G1SATBCardTableLoggingModRefBS;
|
||||||
|
|
||||||
// This barrier is specialized to use a logging barrier to support
|
// This barrier is specialized to use a logging barrier to support
|
||||||
// snapshot-at-the-beginning marking.
|
// snapshot-at-the-beginning marking.
|
||||||
@ -126,18 +128,40 @@ public:
|
|||||||
jbyte val = _byte_map[card_index];
|
jbyte val = _byte_map[card_index];
|
||||||
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
|
||||||
|
private:
|
||||||
|
G1SATBCardTableLoggingModRefBS* _card_table;
|
||||||
|
public:
|
||||||
|
G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
|
||||||
|
|
||||||
|
void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
|
||||||
|
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Adds card-table logging to the post-barrier.
|
// Adds card-table logging to the post-barrier.
|
||||||
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
|
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
|
||||||
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||||
|
friend class G1SATBCardTableLoggingModRefBSChangedListener;
|
||||||
private:
|
private:
|
||||||
|
G1SATBCardTableLoggingModRefBSChangedListener _listener;
|
||||||
DirtyCardQueueSet& _dcqs;
|
DirtyCardQueueSet& _dcqs;
|
||||||
public:
|
public:
|
||||||
|
static size_t compute_size(size_t mem_region_size_in_words) {
|
||||||
|
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
|
||||||
|
return ReservedSpace::allocation_align_size_up(number_of_slots);
|
||||||
|
}
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||||
int max_covered_regions);
|
int max_covered_regions);
|
||||||
|
|
||||||
|
virtual void initialize() { }
|
||||||
|
virtual void initialize(G1RegionToSpaceMapper* mapper);
|
||||||
|
|
||||||
|
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
||||||
|
|
||||||
bool is_a(BarrierSet::Name bsn) {
|
bool is_a(BarrierSet::Name bsn) {
|
||||||
return bsn == BarrierSet::G1SATBCTLogging ||
|
return bsn == BarrierSet::G1SATBCTLogging ||
|
||||||
G1SATBCardTableModRefBS::is_a(bsn);
|
G1SATBCardTableModRefBS::is_a(bsn);
|
||||||
@ -154,8 +178,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
|||||||
|
|
||||||
void write_region_work(MemRegion mr) { invalidate(mr); }
|
void write_region_work(MemRegion mr) { invalidate(mr); }
|
||||||
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
|
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -373,17 +373,17 @@ void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
|
|||||||
_max_regions,
|
_max_regions,
|
||||||
&_static_mem_size);
|
&_static_mem_size);
|
||||||
|
|
||||||
for (uint i = 0; i < n_par_rs; i++) {
|
invalidate(0, _max_regions);
|
||||||
for (uint j = 0; j < _max_regions; j++) {
|
|
||||||
set(i, j, InvalidCard);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FromCardCache::shrink(uint new_num_regions) {
|
void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
|
||||||
|
guarantee((size_t)start_idx + new_num_regions <= max_uintx,
|
||||||
|
err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
|
||||||
|
start_idx, new_num_regions));
|
||||||
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
|
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
|
||||||
assert(new_num_regions <= _max_regions, "Must be within max.");
|
uint end_idx = (start_idx + (uint)new_num_regions);
|
||||||
for (uint j = new_num_regions; j < _max_regions; j++) {
|
assert(end_idx <= _max_regions, "Must be within max.");
|
||||||
|
for (uint j = start_idx; j < end_idx; j++) {
|
||||||
set(i, j, InvalidCard);
|
set(i, j, InvalidCard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -407,12 +407,12 @@ void FromCardCache::clear(uint region_idx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void OtherRegionsTable::init_from_card_cache(uint max_regions) {
|
void OtherRegionsTable::initialize(uint max_regions) {
|
||||||
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
|
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
|
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
|
||||||
FromCardCache::shrink(new_num_regions);
|
FromCardCache::invalidate(start_idx, num_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
void OtherRegionsTable::print_from_card_cache() {
|
void OtherRegionsTable::print_from_card_cache() {
|
||||||
@ -841,7 +841,7 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
|||||||
HeapRegion* hr)
|
HeapRegion* hr)
|
||||||
: _bosa(bosa),
|
: _bosa(bosa),
|
||||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
|
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
|
||||||
_code_roots(), _other_regions(hr, &_m) {
|
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
|
||||||
reset_for_par_iteration();
|
reset_for_par_iteration();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ class FromCardCache : public AllStatic {
|
|||||||
|
|
||||||
static void initialize(uint n_par_rs, uint max_num_regions);
|
static void initialize(uint n_par_rs, uint max_num_regions);
|
||||||
|
|
||||||
static void shrink(uint new_num_regions);
|
static void invalidate(uint start_idx, size_t num_regions);
|
||||||
|
|
||||||
static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
|
static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
|
||||||
|
|
||||||
@ -213,11 +213,11 @@ public:
|
|||||||
|
|
||||||
// Declare the heap size (in # of regions) to the OtherRegionsTable.
|
// Declare the heap size (in # of regions) to the OtherRegionsTable.
|
||||||
// (Uses it to initialize from_card_cache).
|
// (Uses it to initialize from_card_cache).
|
||||||
static void init_from_card_cache(uint max_regions);
|
static void initialize(uint max_regions);
|
||||||
|
|
||||||
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
|
// Declares that regions between start_idx <= i < start_idx + num_regions are
|
||||||
// Make sure any entries for higher regions are invalid.
|
// not in use. Make sure that any entries for these regions are invalid.
|
||||||
static void shrink_from_card_cache(uint new_num_regions);
|
static void invalidate(uint start_idx, size_t num_regions);
|
||||||
|
|
||||||
static void print_from_card_cache();
|
static void print_from_card_cache();
|
||||||
};
|
};
|
||||||
@ -400,12 +400,11 @@ public:
|
|||||||
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
|
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
|
||||||
// (Uses it to initialize from_card_cache).
|
// (Uses it to initialize from_card_cache).
|
||||||
static void init_heap(uint max_regions) {
|
static void init_heap(uint max_regions) {
|
||||||
OtherRegionsTable::init_from_card_cache(max_regions);
|
OtherRegionsTable::initialize(max_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
|
static void invalidate(uint start_idx, uint num_regions) {
|
||||||
static void shrink_heap(uint new_n_regs) {
|
OtherRegionsTable::invalidate(start_idx, num_regions);
|
||||||
OtherRegionsTable::shrink_from_card_cache(new_n_regs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -30,19 +30,33 @@
|
|||||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
void HeapRegionSeq::initialize(ReservedSpace reserved) {
|
void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||||
_reserved = reserved;
|
G1RegionToSpaceMapper* prev_bitmap,
|
||||||
_storage.initialize(reserved, 0);
|
G1RegionToSpaceMapper* next_bitmap,
|
||||||
|
G1RegionToSpaceMapper* bot,
|
||||||
_num_committed = 0;
|
G1RegionToSpaceMapper* cardtable,
|
||||||
|
G1RegionToSpaceMapper* card_counts) {
|
||||||
_allocated_heapregions_length = 0;
|
_allocated_heapregions_length = 0;
|
||||||
|
|
||||||
_regions.initialize((HeapWord*)_storage.low_boundary(), (HeapWord*)_storage.high_boundary(), HeapRegion::GrainBytes);
|
_heap_mapper = heap_storage;
|
||||||
|
|
||||||
|
_prev_bitmap_mapper = prev_bitmap;
|
||||||
|
_next_bitmap_mapper = next_bitmap;
|
||||||
|
|
||||||
|
_bot_mapper = bot;
|
||||||
|
_cardtable_mapper = cardtable;
|
||||||
|
|
||||||
|
_card_counts_mapper = card_counts;
|
||||||
|
|
||||||
|
MemRegion reserved = heap_storage->reserved();
|
||||||
|
_regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
|
||||||
|
|
||||||
|
_available_map.resize(_regions.length(), false);
|
||||||
|
_available_map.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HeapRegionSeq::is_available(uint region) const {
|
bool HeapRegionSeq::is_available(uint region) const {
|
||||||
return region < _num_committed;
|
return _available_map.at(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -58,29 +72,26 @@ HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
|
|||||||
return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionSeq::update_committed_space(HeapWord* old_end,
|
|
||||||
HeapWord* new_end) {
|
|
||||||
assert(old_end != new_end, "don't call this otherwise");
|
|
||||||
// We may not have officially committed the area. So construct and use a separate one.
|
|
||||||
MemRegion new_committed(heap_bottom(), new_end);
|
|
||||||
// Tell the card table about the update.
|
|
||||||
Universe::heap()->barrier_set()->resize_covered_region(new_committed);
|
|
||||||
// Tell the BOT about the update.
|
|
||||||
G1CollectedHeap::heap()->bot_shared()->resize(new_committed.word_size());
|
|
||||||
// Tell the hot card cache about the update
|
|
||||||
G1CollectedHeap::heap()->concurrent_g1_refine()->hot_card_cache()->resize_card_counts(new_committed.byte_size());
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
|
void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
|
||||||
guarantee(num_regions > 0, "Must commit more than zero regions");
|
guarantee(num_regions > 0, "Must commit more than zero regions");
|
||||||
guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
|
guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
|
||||||
|
|
||||||
_storage.expand_by(num_regions * HeapRegion::GrainBytes);
|
_num_committed += (uint)num_regions;
|
||||||
update_committed_space(heap_top(), heap_top() + num_regions * HeapRegion::GrainWords);
|
|
||||||
|
_heap_mapper->commit_regions(index, num_regions);
|
||||||
|
|
||||||
|
// Also commit auxiliary data
|
||||||
|
_prev_bitmap_mapper->commit_regions(index, num_regions);
|
||||||
|
_next_bitmap_mapper->commit_regions(index, num_regions);
|
||||||
|
|
||||||
|
_bot_mapper->commit_regions(index, num_regions);
|
||||||
|
_cardtable_mapper->commit_regions(index, num_regions);
|
||||||
|
|
||||||
|
_card_counts_mapper->commit_regions(index, num_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
|
void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
|
||||||
guarantee(num_regions >= 1, "Need to specify at least one region to uncommit");
|
guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
|
||||||
guarantee(_num_committed >= num_regions, "pre-condition");
|
guarantee(_num_committed >= num_regions, "pre-condition");
|
||||||
|
|
||||||
// Print before uncommitting.
|
// Print before uncommitting.
|
||||||
@ -91,12 +102,19 @@ void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* old_end = heap_top();
|
|
||||||
_num_committed -= (uint)num_regions;
|
_num_committed -= (uint)num_regions;
|
||||||
OrderAccess::fence();
|
|
||||||
|
|
||||||
_storage.shrink_by(num_regions * HeapRegion::GrainBytes);
|
_available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
|
||||||
update_committed_space(old_end, heap_top());
|
_heap_mapper->uncommit_regions(start, num_regions);
|
||||||
|
|
||||||
|
// Also uncommit auxiliary data
|
||||||
|
_prev_bitmap_mapper->uncommit_regions(start, num_regions);
|
||||||
|
_next_bitmap_mapper->uncommit_regions(start, num_regions);
|
||||||
|
|
||||||
|
_bot_mapper->uncommit_regions(start, num_regions);
|
||||||
|
_cardtable_mapper->uncommit_regions(start, num_regions);
|
||||||
|
|
||||||
|
_card_counts_mapper->uncommit_regions(start, num_regions);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
||||||
@ -110,9 +128,7 @@ void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_num_committed += (size_t)num_regions;
|
_available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
|
||||||
|
|
||||||
OrderAccess::fence();
|
|
||||||
|
|
||||||
for (uint i = start; i < start + num_regions; i++) {
|
for (uint i = start; i < start + num_regions; i++) {
|
||||||
assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
|
assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
|
||||||
@ -129,8 +145,7 @@ void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint HeapRegionSeq::expand_by(uint num_regions) {
|
uint HeapRegionSeq::expand_by(uint num_regions) {
|
||||||
// Only ever expand from the end of the heap.
|
return expand_at(0, num_regions);
|
||||||
return expand_at(_num_committed, num_regions);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
|
uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
|
||||||
@ -334,7 +349,8 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
|
|||||||
uint idx_last_found = 0;
|
uint idx_last_found = 0;
|
||||||
uint num_last_found = 0;
|
uint num_last_found = 0;
|
||||||
|
|
||||||
if ((num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
while ((removed < num_regions_to_remove) &&
|
||||||
|
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
||||||
// Only allow uncommit from the end of the heap.
|
// Only allow uncommit from the end of the heap.
|
||||||
if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
|
if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
|
||||||
|
|
||||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||||
|
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||||
|
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
@ -37,13 +38,17 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
|||||||
virtual HeapRegion* default_value() const { return NULL; }
|
virtual HeapRegion* default_value() const { return NULL; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// This class keeps track of the region metadata (i.e., HeapRegion
|
// This class keeps track of the actual heap memory, auxiliary data
|
||||||
// instances). They are kept in the _regions array in address
|
// and its metadata (i.e., HeapRegion instances) and the list of free regions.
|
||||||
// order. A region's index in the array corresponds to its index in
|
//
|
||||||
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
|
// This allows maximum flexibility for deciding what to commit or uncommit given
|
||||||
// the one after it, etc.). Two regions that are consecutive in the
|
// a request from outside.
|
||||||
// array should also be adjacent in the address space (i.e.,
|
//
|
||||||
// region(i).end() == region(i+1).bottom().
|
// HeapRegions are kept in the _regions array in address order. A region's
|
||||||
|
// index in the array corresponds to its index in the heap (i.e., 0 is the
|
||||||
|
// region at the bottom of the heap, 1 is the one after it, etc.). Two
|
||||||
|
// regions that are consecutive in the array should also be adjacent in the
|
||||||
|
// address space (i.e., region(i).end() == region(i+1).bottom().
|
||||||
//
|
//
|
||||||
// We create a HeapRegion when we commit the region's address space
|
// We create a HeapRegion when we commit the region's address space
|
||||||
// for the first time. When we uncommit the address space of a
|
// for the first time. When we uncommit the address space of a
|
||||||
@ -52,32 +57,38 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
|||||||
//
|
//
|
||||||
// We keep track of three lengths:
|
// We keep track of three lengths:
|
||||||
//
|
//
|
||||||
// * _committed_length (returned by length()) is the number of currently
|
// * _num_committed (returned by length()) is the number of currently
|
||||||
// committed regions.
|
// committed regions. These may not be contiguous.
|
||||||
// * _allocated_length (not exposed outside this class) is the
|
// * _allocated_heapregions_length (not exposed outside this class) is the
|
||||||
// number of regions for which we have HeapRegions.
|
// number of regions+1 for which we have HeapRegions.
|
||||||
// * max_length() returns the maximum number of regions the heap can have.
|
// * max_length() returns the maximum number of regions the heap can have.
|
||||||
//
|
//
|
||||||
// and maintain that: _committed_length <= _allocated_length <= max_length()
|
|
||||||
|
|
||||||
class HeapRegionSeq: public CHeapObj<mtGC> {
|
class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
G1HeapRegionTable _regions;
|
G1HeapRegionTable _regions;
|
||||||
|
|
||||||
ReservedSpace _reserved;
|
G1RegionToSpaceMapper* _heap_mapper;
|
||||||
VirtualSpace _storage;
|
G1RegionToSpaceMapper* _prev_bitmap_mapper;
|
||||||
|
G1RegionToSpaceMapper* _next_bitmap_mapper;
|
||||||
|
G1RegionToSpaceMapper* _bot_mapper;
|
||||||
|
G1RegionToSpaceMapper* _cardtable_mapper;
|
||||||
|
G1RegionToSpaceMapper* _card_counts_mapper;
|
||||||
|
|
||||||
FreeRegionList _free_list;
|
FreeRegionList _free_list;
|
||||||
|
|
||||||
// The number of regions committed in the heap.
|
// Each bit in this bitmap indicates that the corresponding region is available
|
||||||
|
// for allocation.
|
||||||
|
BitMap _available_map;
|
||||||
|
|
||||||
|
// The number of regions committed in the heap.
|
||||||
uint _num_committed;
|
uint _num_committed;
|
||||||
|
|
||||||
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
|
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
|
||||||
uint _allocated_heapregions_length;
|
uint _allocated_heapregions_length;
|
||||||
|
|
||||||
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
|
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
|
||||||
HeapWord* heap_top() const { return heap_bottom() + _num_committed * HeapRegion::GrainWords; }
|
|
||||||
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
|
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
|
||||||
|
|
||||||
void make_regions_available(uint index, uint num_regions = 1);
|
void make_regions_available(uint index, uint num_regions = 1);
|
||||||
@ -92,6 +103,11 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
|||||||
// that they do not all start from the same region.
|
// that they do not all start from the same region.
|
||||||
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
|
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
|
||||||
|
|
||||||
|
// Find a contiguous set of empty or uncommitted regions of length num and return
|
||||||
|
// the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
|
||||||
|
// If only_empty is true, only empty regions are considered.
|
||||||
|
// Searches from bottom to top of the heap, doing a first-fit.
|
||||||
|
uint find_contiguous(size_t num, bool only_empty);
|
||||||
// Finds the next sequence of unavailable regions starting from start_idx. Returns the
|
// Finds the next sequence of unavailable regions starting from start_idx. Returns the
|
||||||
// length of the sequence found. If this result is zero, no such sequence could be found,
|
// length of the sequence found. If this result is zero, no such sequence could be found,
|
||||||
// otherwise res_idx indicates the start index of these regions.
|
// otherwise res_idx indicates the start index of these regions.
|
||||||
@ -100,6 +116,8 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
|||||||
// the heap. Returns the length of the sequence found. If this value is zero, no
|
// the heap. Returns the length of the sequence found. If this value is zero, no
|
||||||
// sequence could be found, otherwise res_idx contains the start index of this range.
|
// sequence could be found, otherwise res_idx contains the start index of this range.
|
||||||
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
|
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
|
||||||
|
// Allocate a new HeapRegion for the given index.
|
||||||
|
HeapRegion* new_heap_region(uint hrs_index);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
public:
|
public:
|
||||||
bool is_free(HeapRegion* hr) const;
|
bool is_free(HeapRegion* hr) const;
|
||||||
@ -107,16 +125,20 @@ public:
|
|||||||
// Returns whether the given region is available for allocation.
|
// Returns whether the given region is available for allocation.
|
||||||
bool is_available(uint region) const;
|
bool is_available(uint region) const;
|
||||||
|
|
||||||
// Allocate a new HeapRegion for the given index.
|
|
||||||
HeapRegion* new_heap_region(uint hrs_index);
|
|
||||||
public:
|
public:
|
||||||
// Empty constructor, we'll initialize it with the initialize() method.
|
// Empty constructor, we'll initialize it with the initialize() method.
|
||||||
HeapRegionSeq() : _regions(), _reserved(), _storage(), _num_committed(0),
|
HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
|
||||||
_free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
|
_next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
|
||||||
_allocated_heapregions_length(0)
|
_allocated_heapregions_length(0), _available_map(),
|
||||||
|
_free_list("Free list", new MasterFreeRegionListMtSafeChecker())
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
void initialize(ReservedSpace reserved);
|
void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||||
|
G1RegionToSpaceMapper* prev_bitmap,
|
||||||
|
G1RegionToSpaceMapper* next_bitmap,
|
||||||
|
G1RegionToSpaceMapper* bot,
|
||||||
|
G1RegionToSpaceMapper* cardtable,
|
||||||
|
G1RegionToSpaceMapper* card_counts);
|
||||||
|
|
||||||
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
|
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
|
||||||
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
|
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
|
||||||
@ -175,8 +197,6 @@ public:
|
|||||||
// Return the maximum number of regions in the heap.
|
// Return the maximum number of regions in the heap.
|
||||||
uint max_length() const { return (uint)_regions.length(); }
|
uint max_length() const { return (uint)_regions.length(); }
|
||||||
|
|
||||||
MemRegion committed() const { return MemRegion(heap_bottom(), heap_top()); }
|
|
||||||
|
|
||||||
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
|
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
|
||||||
|
|
||||||
// Expand the sequence to reflect that the heap has grown. Either create new
|
// Expand the sequence to reflect that the heap has grown. Either create new
|
||||||
@ -190,11 +210,12 @@ public:
|
|||||||
// this.
|
// this.
|
||||||
uint expand_at(uint start, uint num_regions);
|
uint expand_at(uint start, uint num_regions);
|
||||||
|
|
||||||
// Find a contiguous set of empty or uncommitted regions of length num and return
|
// Find a contiguous set of empty regions of length num. Returns the start index of
|
||||||
// the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
|
// that set, or G1_NO_HRS_INDEX.
|
||||||
// If only_empty is true, only empty regions are considered.
|
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
|
||||||
// Searches from bottom to top of the heap, doing a first-fit.
|
// Find a contiguous set of empty or unavailable regions of length num. Returns the
|
||||||
uint find_contiguous(size_t num, bool only_empty);
|
// start index of that set, or G1_NO_HRS_INDEX.
|
||||||
|
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
|
||||||
|
|
||||||
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "gc_implementation/g1/heapRegion.hpp"
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||||
|
|
||||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||||
assert(addr < heap_end(),
|
assert(addr < heap_end(),
|
||||||
@ -35,7 +36,6 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
|||||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
|
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
|
||||||
|
|
||||||
HeapRegion* hr = _regions.get_by_address(addr);
|
HeapRegion* hr = _regions.get_by_address(addr);
|
||||||
assert(hr != NULL, "invariant");
|
|
||||||
return hr;
|
return hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,3 +403,41 @@ void HumongousRegionSetMtSafeChecker::check() {
|
|||||||
"master humongous set MT safety protocol outside a safepoint");
|
"master humongous set MT safety protocol outside a safepoint");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FreeRegionList_test() {
|
||||||
|
FreeRegionList l("test");
|
||||||
|
|
||||||
|
const uint num_regions_in_test = 5;
|
||||||
|
// Create a fake heap. It does not need to be valid, as the HeapRegion constructor
|
||||||
|
// does not access it.
|
||||||
|
MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
|
||||||
|
// Allocate a fake BOT because the HeapRegion constructor initializes
|
||||||
|
// the BOT.
|
||||||
|
size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
|
||||||
|
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
|
||||||
|
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
|
||||||
|
G1RegionToSpaceMapper* bot_storage =
|
||||||
|
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||||
|
os::vm_page_size(),
|
||||||
|
HeapRegion::GrainBytes,
|
||||||
|
G1BlockOffsetSharedArray::N_bytes,
|
||||||
|
mtGC);
|
||||||
|
G1BlockOffsetSharedArray oa(heap, bot_storage);
|
||||||
|
bot_storage->commit_regions(0, num_regions_in_test);
|
||||||
|
HeapRegion hr0(0, &oa, heap);
|
||||||
|
HeapRegion hr1(1, &oa, heap);
|
||||||
|
HeapRegion hr2(2, &oa, heap);
|
||||||
|
HeapRegion hr3(3, &oa, heap);
|
||||||
|
HeapRegion hr4(4, &oa, heap);
|
||||||
|
l.add_ordered(&hr1);
|
||||||
|
l.add_ordered(&hr0);
|
||||||
|
l.add_ordered(&hr3);
|
||||||
|
l.add_ordered(&hr4);
|
||||||
|
l.add_ordered(&hr2);
|
||||||
|
assert(l.length() == num_regions_in_test, "wrong length");
|
||||||
|
l.verify_list();
|
||||||
|
|
||||||
|
bot_storage->uncommit_regions(0, num_regions_in_test);
|
||||||
|
delete bot_storage;
|
||||||
|
FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
|
||||||
|
}
|
||||||
|
@ -78,6 +78,7 @@ jint ParallelScavengeHeap::initialize() {
|
|||||||
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
|
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
|
||||||
|
barrier_set->initialize();
|
||||||
_barrier_set = barrier_set;
|
_barrier_set = barrier_set;
|
||||||
oopDesc::set_bs(_barrier_set);
|
oopDesc::set_bs(_barrier_set);
|
||||||
if (_barrier_set == NULL) {
|
if (_barrier_set == NULL) {
|
||||||
|
@ -44,13 +44,6 @@
|
|||||||
// enumerate ref fields that have been modified (since the last
|
// enumerate ref fields that have been modified (since the last
|
||||||
// enumeration.)
|
// enumeration.)
|
||||||
|
|
||||||
size_t CardTableModRefBS::cards_required(size_t covered_words)
|
|
||||||
{
|
|
||||||
// Add one for a guard card, used to detect errors.
|
|
||||||
const size_t words = align_size_up(covered_words, card_size_in_words);
|
|
||||||
return words / card_size_in_words + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t CardTableModRefBS::compute_byte_map_size()
|
size_t CardTableModRefBS::compute_byte_map_size()
|
||||||
{
|
{
|
||||||
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
|
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
|
||||||
@ -64,27 +57,50 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|
|||||||
int max_covered_regions):
|
int max_covered_regions):
|
||||||
ModRefBarrierSet(max_covered_regions),
|
ModRefBarrierSet(max_covered_regions),
|
||||||
_whole_heap(whole_heap),
|
_whole_heap(whole_heap),
|
||||||
_guard_index(cards_required(whole_heap.word_size()) - 1),
|
_guard_index(0),
|
||||||
_last_valid_index(_guard_index - 1),
|
_guard_region(),
|
||||||
|
_last_valid_index(0),
|
||||||
_page_size(os::vm_page_size()),
|
_page_size(os::vm_page_size()),
|
||||||
_byte_map_size(compute_byte_map_size())
|
_byte_map_size(0),
|
||||||
|
_covered(NULL),
|
||||||
|
_committed(NULL),
|
||||||
|
_cur_covered_regions(0),
|
||||||
|
_byte_map(NULL),
|
||||||
|
byte_map_base(NULL),
|
||||||
|
// LNC functionality
|
||||||
|
_lowest_non_clean(NULL),
|
||||||
|
_lowest_non_clean_chunk_size(NULL),
|
||||||
|
_lowest_non_clean_base_chunk_index(NULL),
|
||||||
|
_last_LNC_resizing_collection(NULL)
|
||||||
{
|
{
|
||||||
_kind = BarrierSet::CardTableModRef;
|
_kind = BarrierSet::CardTableModRef;
|
||||||
|
|
||||||
HeapWord* low_bound = _whole_heap.start();
|
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
|
||||||
HeapWord* high_bound = _whole_heap.end();
|
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
|
||||||
assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
|
|
||||||
assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
|
|
||||||
|
|
||||||
assert(card_size <= 512, "card_size must be less than 512"); // why?
|
assert(card_size <= 512, "card_size must be less than 512"); // why?
|
||||||
|
|
||||||
_covered = new MemRegion[max_covered_regions];
|
_covered = new MemRegion[_max_covered_regions];
|
||||||
_committed = new MemRegion[max_covered_regions];
|
if (_covered == NULL) {
|
||||||
if (_covered == NULL || _committed == NULL) {
|
vm_exit_during_initialization("Could not allocate card table covered region set.");
|
||||||
vm_exit_during_initialization("couldn't alloc card table covered region set.");
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CardTableModRefBS::initialize() {
|
||||||
|
_guard_index = cards_required(_whole_heap.word_size()) - 1;
|
||||||
|
_last_valid_index = _guard_index - 1;
|
||||||
|
|
||||||
|
_byte_map_size = compute_byte_map_size();
|
||||||
|
|
||||||
|
HeapWord* low_bound = _whole_heap.start();
|
||||||
|
HeapWord* high_bound = _whole_heap.end();
|
||||||
|
|
||||||
_cur_covered_regions = 0;
|
_cur_covered_regions = 0;
|
||||||
|
_committed = new MemRegion[_max_covered_regions];
|
||||||
|
if (_committed == NULL) {
|
||||||
|
vm_exit_during_initialization("Could not allocate card table committed region set.");
|
||||||
|
}
|
||||||
|
|
||||||
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
|
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
|
||||||
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
|
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
|
||||||
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
|
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
|
||||||
@ -114,20 +130,20 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|
|||||||
!ExecMem, "card table last card");
|
!ExecMem, "card table last card");
|
||||||
*guard_card = last_card;
|
*guard_card = last_card;
|
||||||
|
|
||||||
_lowest_non_clean =
|
_lowest_non_clean =
|
||||||
NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
|
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
|
||||||
_lowest_non_clean_chunk_size =
|
_lowest_non_clean_chunk_size =
|
||||||
NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
|
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
|
||||||
_lowest_non_clean_base_chunk_index =
|
_lowest_non_clean_base_chunk_index =
|
||||||
NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
|
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
|
||||||
_last_LNC_resizing_collection =
|
_last_LNC_resizing_collection =
|
||||||
NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
|
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
|
||||||
if (_lowest_non_clean == NULL
|
if (_lowest_non_clean == NULL
|
||||||
|| _lowest_non_clean_chunk_size == NULL
|
|| _lowest_non_clean_chunk_size == NULL
|
||||||
|| _lowest_non_clean_base_chunk_index == NULL
|
|| _lowest_non_clean_base_chunk_index == NULL
|
||||||
|| _last_LNC_resizing_collection == NULL)
|
|| _last_LNC_resizing_collection == NULL)
|
||||||
vm_exit_during_initialization("couldn't allocate an LNC array.");
|
vm_exit_during_initialization("couldn't allocate an LNC array.");
|
||||||
for (int i = 0; i < max_covered_regions; i++) {
|
for (int i = 0; i < _max_covered_regions; i++) {
|
||||||
_lowest_non_clean[i] = NULL;
|
_lowest_non_clean[i] = NULL;
|
||||||
_lowest_non_clean_chunk_size[i] = 0;
|
_lowest_non_clean_chunk_size[i] = 0;
|
||||||
_last_LNC_resizing_collection[i] = -1;
|
_last_LNC_resizing_collection[i] = -1;
|
||||||
@ -650,7 +666,7 @@ void CardTableModRefBS::verify_region(MemRegion mr,
|
|||||||
jbyte val, bool val_equals) {
|
jbyte val, bool val_equals) {
|
||||||
jbyte* start = byte_for(mr.start());
|
jbyte* start = byte_for(mr.start());
|
||||||
jbyte* end = byte_for(mr.last());
|
jbyte* end = byte_for(mr.last());
|
||||||
bool failures = false;
|
bool failures = false;
|
||||||
for (jbyte* curr = start; curr <= end; ++curr) {
|
for (jbyte* curr = start; curr <= end; ++curr) {
|
||||||
jbyte curr_val = *curr;
|
jbyte curr_val = *curr;
|
||||||
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
|
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
|
||||||
|
@ -96,12 +96,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||||||
// The declaration order of these const fields is important; see the
|
// The declaration order of these const fields is important; see the
|
||||||
// constructor before changing.
|
// constructor before changing.
|
||||||
const MemRegion _whole_heap; // the region covered by the card table
|
const MemRegion _whole_heap; // the region covered by the card table
|
||||||
const size_t _guard_index; // index of very last element in the card
|
size_t _guard_index; // index of very last element in the card
|
||||||
// table; it is set to a guard value
|
// table; it is set to a guard value
|
||||||
// (last_card) and should never be modified
|
// (last_card) and should never be modified
|
||||||
const size_t _last_valid_index; // index of the last valid element
|
size_t _last_valid_index; // index of the last valid element
|
||||||
const size_t _page_size; // page size used when mapping _byte_map
|
const size_t _page_size; // page size used when mapping _byte_map
|
||||||
const size_t _byte_map_size; // in bytes
|
size_t _byte_map_size; // in bytes
|
||||||
jbyte* _byte_map; // the card marking array
|
jbyte* _byte_map; // the card marking array
|
||||||
|
|
||||||
int _cur_covered_regions;
|
int _cur_covered_regions;
|
||||||
@ -123,7 +123,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||||||
protected:
|
protected:
|
||||||
// Initialization utilities; covered_words is the size of the covered region
|
// Initialization utilities; covered_words is the size of the covered region
|
||||||
// in, um, words.
|
// in, um, words.
|
||||||
inline size_t cards_required(size_t covered_words);
|
inline size_t cards_required(size_t covered_words) {
|
||||||
|
// Add one for a guard card, used to detect errors.
|
||||||
|
const size_t words = align_size_up(covered_words, card_size_in_words);
|
||||||
|
return words / card_size_in_words + 1;
|
||||||
|
}
|
||||||
|
|
||||||
inline size_t compute_byte_map_size();
|
inline size_t compute_byte_map_size();
|
||||||
|
|
||||||
// Finds and return the index of the region, if any, to which the given
|
// Finds and return the index of the region, if any, to which the given
|
||||||
@ -137,7 +142,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||||||
int find_covering_region_containing(HeapWord* addr);
|
int find_covering_region_containing(HeapWord* addr);
|
||||||
|
|
||||||
// Resize one of the regions covered by the remembered set.
|
// Resize one of the regions covered by the remembered set.
|
||||||
void resize_covered_region(MemRegion new_region);
|
virtual void resize_covered_region(MemRegion new_region);
|
||||||
|
|
||||||
// Returns the leftmost end of a committed region corresponding to a
|
// Returns the leftmost end of a committed region corresponding to a
|
||||||
// covered region before covered region "ind", or else "NULL" if "ind" is
|
// covered region before covered region "ind", or else "NULL" if "ind" is
|
||||||
@ -282,6 +287,8 @@ public:
|
|||||||
CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
|
CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
|
||||||
~CardTableModRefBS();
|
~CardTableModRefBS();
|
||||||
|
|
||||||
|
virtual void initialize();
|
||||||
|
|
||||||
// *** Barrier set functions.
|
// *** Barrier set functions.
|
||||||
|
|
||||||
bool has_write_ref_pre_barrier() { return false; }
|
bool has_write_ref_pre_barrier() { return false; }
|
||||||
|
@ -54,6 +54,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
|
|||||||
#else
|
#else
|
||||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
|
_ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
|
||||||
#endif
|
#endif
|
||||||
|
_ct_bs->initialize();
|
||||||
set_bs(_ct_bs);
|
set_bs(_ct_bs);
|
||||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
||||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||||
|
@ -3865,6 +3865,7 @@ void TestOldFreeSpaceCalculation_test();
|
|||||||
void TestG1BiasedArray_test();
|
void TestG1BiasedArray_test();
|
||||||
void TestBufferingOopClosure_test();
|
void TestBufferingOopClosure_test();
|
||||||
void TestCodeCacheRemSet_test();
|
void TestCodeCacheRemSet_test();
|
||||||
|
void FreeRegionList_test();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void execute_internal_vm_tests() {
|
void execute_internal_vm_tests() {
|
||||||
@ -3900,6 +3901,9 @@ void execute_internal_vm_tests() {
|
|||||||
run_unit_test(HeapRegionRemSet::test_prt());
|
run_unit_test(HeapRegionRemSet::test_prt());
|
||||||
run_unit_test(TestBufferingOopClosure_test());
|
run_unit_test(TestBufferingOopClosure_test());
|
||||||
run_unit_test(TestCodeCacheRemSet_test());
|
run_unit_test(TestCodeCacheRemSet_test());
|
||||||
|
if (UseG1GC) {
|
||||||
|
run_unit_test(FreeRegionList_test());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
tty->print_cr("All internal VM tests passed");
|
tty->print_cr("All internal VM tests passed");
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user