Merge
This commit is contained in:
commit
0fc1aa3a2b
@ -1157,6 +1157,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
} else {
|
||||
// We're done with marking.
|
||||
JavaThread::satb_mark_queue_set().set_active_all_threads(false);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
g1h->prepare_for_verify();
|
||||
g1h->verify(/* allow_dirty */ true,
|
||||
/* silent */ false,
|
||||
/* use_prev_marking */ false);
|
||||
}
|
||||
}
|
||||
|
||||
#if VERIFY_OBJS_PROCESSED
|
||||
@ -1747,12 +1754,12 @@ void ConcurrentMark::cleanup() {
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
g1h->increment_total_collections();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (VerifyDuringGC) {
|
||||
G1CollectedHeap::heap()->prepare_for_verify();
|
||||
G1CollectedHeap::heap()->verify(true,false);
|
||||
g1h->prepare_for_verify();
|
||||
g1h->verify(/* allow_dirty */ true,
|
||||
/* silent */ false,
|
||||
/* use_prev_marking */ true);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void ConcurrentMark::completeCleanup() {
|
||||
|
@ -1535,6 +1535,15 @@ jint G1CollectedHeap::initialize() {
|
||||
guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
|
||||
guarantee(_cur_alloc_region == NULL, "from constructor");
|
||||
|
||||
// 6843694 - ensure that the maximum region index can fit
|
||||
// in the remembered set structures.
|
||||
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
||||
|
||||
const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
|
||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||
guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
|
||||
|
||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
||||
heap_word_size(init_byte_size));
|
||||
|
||||
@ -2127,17 +2136,22 @@ public:
|
||||
};
|
||||
|
||||
class VerifyObjsInRegionClosure: public ObjectClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _live_bytes;
|
||||
HeapRegion *_hr;
|
||||
bool _use_prev_marking;
|
||||
public:
|
||||
VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
|
||||
: _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
}
|
||||
void do_object(oop o) {
|
||||
VerifyLivenessOopClosure isLive(_g1h);
|
||||
assert(o != NULL, "Huh?");
|
||||
if (!_g1h->is_obj_dead(o)) {
|
||||
if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
|
||||
o->oop_iterate(&isLive);
|
||||
if (!_hr->obj_allocated_since_prev_marking(o))
|
||||
_live_bytes += (o->size() * HeapWordSize);
|
||||
@ -2176,17 +2190,22 @@ public:
|
||||
};
|
||||
|
||||
class VerifyRegionClosure: public HeapRegionClosure {
|
||||
public:
|
||||
private:
|
||||
bool _allow_dirty;
|
||||
bool _par;
|
||||
VerifyRegionClosure(bool allow_dirty, bool par = false)
|
||||
: _allow_dirty(allow_dirty), _par(par) {}
|
||||
bool _use_prev_marking;
|
||||
public:
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
|
||||
: _allow_dirty(allow_dirty), _par(par),
|
||||
_use_prev_marking(use_prev_marking) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
||||
"Should be unclaimed at verify points.");
|
||||
if (!r->continuesHumongous()) {
|
||||
VerifyObjsInRegionClosure not_dead_yet_cl(r);
|
||||
r->verify(_allow_dirty);
|
||||
VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
|
||||
r->verify(_allow_dirty, _use_prev_marking);
|
||||
r->object_iterate(¬_dead_yet_cl);
|
||||
guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
|
||||
"More live objects than counted in last complete marking.");
|
||||
@ -2199,10 +2218,13 @@ class VerifyRootsClosure: public OopsInGenClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
bool _failures;
|
||||
|
||||
bool _use_prev_marking;
|
||||
public:
|
||||
VerifyRootsClosure() :
|
||||
_g1h(G1CollectedHeap::heap()), _failures(false) { }
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyRootsClosure(bool use_prev_marking) :
|
||||
_g1h(G1CollectedHeap::heap()), _failures(false),
|
||||
_use_prev_marking(use_prev_marking) { }
|
||||
|
||||
bool failures() { return _failures; }
|
||||
|
||||
@ -2213,7 +2235,7 @@ public:
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
if (_g1h->is_obj_dead(obj)) {
|
||||
if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
||||
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
||||
obj->print_on(gclog_or_tty);
|
||||
@ -2229,24 +2251,35 @@ class G1ParVerifyTask: public AbstractGangTask {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
bool _allow_dirty;
|
||||
bool _use_prev_marking;
|
||||
|
||||
public:
|
||||
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
|
||||
bool use_prev_marking) :
|
||||
AbstractGangTask("Parallel verify task"),
|
||||
_g1h(g1h), _allow_dirty(allow_dirty) { }
|
||||
_g1h(g1h), _allow_dirty(allow_dirty),
|
||||
_use_prev_marking(use_prev_marking) { }
|
||||
|
||||
void work(int worker_i) {
|
||||
HandleMark hm;
|
||||
VerifyRegionClosure blk(_allow_dirty, true);
|
||||
VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
|
||||
_g1h->heap_region_par_iterate_chunked(&blk, worker_i,
|
||||
HeapRegion::ParVerifyClaimValue);
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
|
||||
verify(allow_dirty, silent, /* use_prev_marking */ true);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify(bool allow_dirty,
|
||||
bool silent,
|
||||
bool use_prev_marking) {
|
||||
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
|
||||
if (!silent) { gclog_or_tty->print("roots "); }
|
||||
VerifyRootsClosure rootsCl;
|
||||
VerifyRootsClosure rootsCl(use_prev_marking);
|
||||
process_strong_roots(false,
|
||||
SharedHeap::SO_AllClasses,
|
||||
&rootsCl,
|
||||
@ -2257,7 +2290,7 @@ void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
|
||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
|
||||
G1ParVerifyTask task(this, allow_dirty);
|
||||
G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
|
||||
int n_workers = workers()->total_workers();
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&task);
|
||||
@ -2271,7 +2304,7 @@ void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
|
||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
VerifyRegionClosure blk(allow_dirty);
|
||||
VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
|
||||
_hrs->iterate(&blk);
|
||||
}
|
||||
if (!silent) gclog_or_tty->print("remset ");
|
||||
|
@ -59,6 +59,9 @@ class ConcurrentZFThread;
|
||||
typedef GenericTaskQueue<oop*> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
|
||||
|
||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
|
||||
enum G1GCThreadGroups {
|
||||
G1CRGroup = 0,
|
||||
G1ZFGroup = 1,
|
||||
@ -1046,6 +1049,17 @@ public:
|
||||
virtual void prepare_for_verify();
|
||||
|
||||
// Perform verification.
|
||||
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
// NOTE: Only the "prev" marking information is guaranteed to be
|
||||
// consistent most of the time, so most calls to this should use
|
||||
// use_prev_marking == true. Currently, there is only one case where
|
||||
// this is called with use_prev_marking == false, which is to verify
|
||||
// the "next" marking information at the end of remark.
|
||||
void verify(bool allow_dirty, bool silent, bool use_prev_marking);
|
||||
|
||||
// Override; it uses the "prev" marking information
|
||||
virtual void verify(bool allow_dirty, bool silent);
|
||||
virtual void print() const;
|
||||
virtual void print_on(outputStream* st) const;
|
||||
@ -1122,6 +1136,18 @@ public:
|
||||
bool isMarkedPrev(oop obj) const;
|
||||
bool isMarkedNext(oop obj) const;
|
||||
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const bool use_prev_marking) const {
|
||||
if (use_prev_marking) {
|
||||
return is_obj_dead(obj, hr);
|
||||
} else {
|
||||
return is_obj_ill(obj, hr);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs. An object is dead
|
||||
// iff a) it was not allocated since the last mark and b) it
|
||||
@ -1159,8 +1185,19 @@ public:
|
||||
// Added if it is in permanent gen it isn't dead.
|
||||
// Added if it is NULL it isn't dead.
|
||||
|
||||
bool is_obj_dead(oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const bool use_prev_marking) {
|
||||
if (use_prev_marking) {
|
||||
return is_obj_dead(obj);
|
||||
} else {
|
||||
return is_obj_ill(obj);
|
||||
}
|
||||
}
|
||||
|
||||
bool is_obj_dead(const oop obj) {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (Universe::heap()->is_in_permanent(obj))
|
||||
return false;
|
||||
@ -1170,8 +1207,8 @@ public:
|
||||
else return is_obj_dead(obj, hr);
|
||||
}
|
||||
|
||||
bool is_obj_ill(oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
bool is_obj_ill(const oop obj) {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (Universe::heap()->is_in_permanent(obj))
|
||||
return false;
|
||||
|
@ -40,15 +40,19 @@ FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
|
||||
{}
|
||||
|
||||
class VerifyLiveClosure: public OopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
CardTableModRefBS* _bs;
|
||||
oop _containing_obj;
|
||||
bool _failures;
|
||||
int _n_failures;
|
||||
bool _use_prev_marking;
|
||||
public:
|
||||
VerifyLiveClosure(G1CollectedHeap* g1h) :
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
|
||||
_g1h(g1h), _bs(NULL), _containing_obj(NULL),
|
||||
_failures(false), _n_failures(0)
|
||||
_failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
|
||||
{
|
||||
BarrierSet* bs = _g1h->barrier_set();
|
||||
if (bs->is_a(BarrierSet::CardTableModRef))
|
||||
@ -68,11 +72,13 @@ public:
|
||||
|
||||
void do_oop(oop* p) {
|
||||
assert(_containing_obj != NULL, "Precondition");
|
||||
assert(!_g1h->is_obj_dead(_containing_obj), "Precondition");
|
||||
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
|
||||
"Precondition");
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
bool failed = false;
|
||||
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead(obj)) {
|
||||
if (!_g1h->is_in_closed_subset(obj) ||
|
||||
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||
if (!_failures) {
|
||||
gclog_or_tty->print_cr("");
|
||||
gclog_or_tty->print_cr("----------");
|
||||
@ -647,19 +653,23 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
G1OffsetTableContigSpace::print_on(st);
|
||||
}
|
||||
|
||||
void HeapRegion::verify(bool allow_dirty) const {
|
||||
verify(allow_dirty, /* use_prev_marking */ true);
|
||||
}
|
||||
|
||||
#define OBJ_SAMPLE_INTERVAL 0
|
||||
#define BLOCK_SAMPLE_INTERVAL 100
|
||||
|
||||
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
||||
// We would need a mechanism to make that code skip dead objects.
|
||||
|
||||
void HeapRegion::verify(bool allow_dirty) const {
|
||||
void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
|
||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||
HeapWord* p = bottom();
|
||||
HeapWord* prev_p = NULL;
|
||||
int objs = 0;
|
||||
int blocks = 0;
|
||||
VerifyLiveClosure vl_cl(g1);
|
||||
VerifyLiveClosure vl_cl(g1, use_prev_marking);
|
||||
while (p < top()) {
|
||||
size_t size = oop(p)->size();
|
||||
if (blocks == BLOCK_SAMPLE_INTERVAL) {
|
||||
@ -671,7 +681,7 @@ void HeapRegion::verify(bool allow_dirty) const {
|
||||
}
|
||||
if (objs == OBJ_SAMPLE_INTERVAL) {
|
||||
oop obj = oop(p);
|
||||
if (!g1->is_obj_dead(obj, this)) {
|
||||
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
|
||||
obj->verify();
|
||||
vl_cl.set_containing_obj(obj);
|
||||
obj->oop_iterate(&vl_cl);
|
||||
|
@ -782,7 +782,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
void print() const;
|
||||
void print_on(outputStream* st) const;
|
||||
|
||||
// Override
|
||||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
// NOTE: Only the "prev" marking information is guaranteed to be
|
||||
// consistent most of the time, so most calls to this should use
|
||||
// use_prev_marking == true. Currently, there is only one case where
|
||||
// this is called with use_prev_marking == false, which is to verify
|
||||
// the "next" marking information at the end of remark.
|
||||
void verify(bool allow_dirty, bool use_prev_marking) const;
|
||||
|
||||
// Override; it uses the "prev" marking information
|
||||
virtual void verify(bool allow_dirty) const;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -109,7 +109,7 @@ protected:
|
||||
return new PerRegionTable(hr);
|
||||
}
|
||||
|
||||
void add_card_work(short from_card, bool par) {
|
||||
void add_card_work(CardIdx_t from_card, bool par) {
|
||||
if (!_bm.at(from_card)) {
|
||||
if (par) {
|
||||
if (_bm.par_at_put(from_card, 1)) {
|
||||
@ -141,11 +141,11 @@ protected:
|
||||
// and adding a bit to the new table is never incorrect.
|
||||
if (loc_hr->is_in_reserved(from)) {
|
||||
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
|
||||
size_t from_card =
|
||||
hw_offset >>
|
||||
(CardTableModRefBS::card_shift - LogHeapWordSize);
|
||||
CardIdx_t from_card = (CardIdx_t)
|
||||
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
|
||||
|
||||
add_card_work((short) from_card, par);
|
||||
assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
|
||||
add_card_work(from_card, par);
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,11 +190,11 @@ public:
|
||||
#endif
|
||||
}
|
||||
|
||||
void add_card(short from_card_index) {
|
||||
void add_card(CardIdx_t from_card_index) {
|
||||
add_card_work(from_card_index, /*parallel*/ true);
|
||||
}
|
||||
|
||||
void seq_add_card(short from_card_index) {
|
||||
void seq_add_card(CardIdx_t from_card_index) {
|
||||
add_card_work(from_card_index, /*parallel*/ false);
|
||||
}
|
||||
|
||||
@ -604,7 +604,7 @@ void OtherRegionsTable::add_reference(oop* from, int tid) {
|
||||
|
||||
// Note that this may be a continued H region.
|
||||
HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
|
||||
size_t from_hrs_ind = (size_t)from_hr->hrs_index();
|
||||
RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
|
||||
|
||||
// If the region is already coarsened, return.
|
||||
if (_coarse_map.at(from_hrs_ind)) {
|
||||
@ -627,11 +627,11 @@ void OtherRegionsTable::add_reference(oop* from, int tid) {
|
||||
uintptr_t from_hr_bot_card_index =
|
||||
uintptr_t(from_hr->bottom())
|
||||
>> CardTableModRefBS::card_shift;
|
||||
int card_index = from_card - from_hr_bot_card_index;
|
||||
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
|
||||
"Must be in range.");
|
||||
if (G1HRRSUseSparseTable &&
|
||||
_sparse_table.add_card((short) from_hrs_ind, card_index)) {
|
||||
_sparse_table.add_card(from_hrs_ind, card_index)) {
|
||||
if (G1RecordHRRSOops) {
|
||||
HeapRegionRemSet::record(hr(), from);
|
||||
#if HRRS_VERBOSE
|
||||
@ -656,9 +656,9 @@ void OtherRegionsTable::add_reference(oop* from, int tid) {
|
||||
}
|
||||
|
||||
// Otherwise, transfer from sparse to fine-grain.
|
||||
short cards[SparsePRTEntry::CardsPerEntry];
|
||||
CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
|
||||
if (G1HRRSUseSparseTable) {
|
||||
bool res = _sparse_table.get_cards((short) from_hrs_ind, &cards[0]);
|
||||
bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
|
||||
assert(res, "There should have been an entry");
|
||||
}
|
||||
|
||||
@ -679,13 +679,13 @@ void OtherRegionsTable::add_reference(oop* from, int tid) {
|
||||
// Add in the cards from the sparse table.
|
||||
if (G1HRRSUseSparseTable) {
|
||||
for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
|
||||
short c = cards[i];
|
||||
CardIdx_t c = cards[i];
|
||||
if (c != SparsePRTEntry::NullEntry) {
|
||||
prt->add_card(c);
|
||||
}
|
||||
}
|
||||
// Now we can delete the sparse entry.
|
||||
bool res = _sparse_table.delete_entry((short) from_hrs_ind);
|
||||
bool res = _sparse_table.delete_entry(from_hrs_ind);
|
||||
assert(res, "It should have been there.");
|
||||
}
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ bool OtherRegionsTable::contains_reference(oop* from) const {
|
||||
bool OtherRegionsTable::contains_reference_locked(oop* from) const {
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||
if (hr == NULL) return false;
|
||||
size_t hr_ind = hr->hrs_index();
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||
// Is this region in the coarse map?
|
||||
if (_coarse_map.at(hr_ind)) return true;
|
||||
|
||||
@ -1045,8 +1045,9 @@ bool OtherRegionsTable::contains_reference_locked(oop* from) const {
|
||||
uintptr_t hr_bot_card_index =
|
||||
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
|
||||
assert(from_card >= hr_bot_card_index, "Inv");
|
||||
int card_index = from_card - hr_bot_card_index;
|
||||
return _sparse_table.contains_card((short)hr_ind, card_index);
|
||||
CardIdx_t card_index = from_card - hr_bot_card_index;
|
||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
|
||||
return _sparse_table.contains_card(hr_ind, card_index);
|
||||
}
|
||||
|
||||
|
||||
|
@ -33,7 +33,7 @@ void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
sprt_iter->init(this);
|
||||
}
|
||||
|
||||
void SparsePRTEntry::init(short region_ind) {
|
||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||
_region_ind = region_ind;
|
||||
_next_index = NullEntry;
|
||||
#if UNROLL_CARD_LOOPS
|
||||
@ -43,11 +43,12 @@ void SparsePRTEntry::init(short region_ind) {
|
||||
_cards[2] = NullEntry;
|
||||
_cards[3] = NullEntry;
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) _cards[i] = NullEntry;
|
||||
for (int i = 0; i < CardsPerEntry; i++)
|
||||
_cards[i] = NullEntry;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool SparsePRTEntry::contains_card(short card_index) const {
|
||||
bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
if (_cards[0] == card_index) return true;
|
||||
@ -80,10 +81,10 @@ int SparsePRTEntry::num_valid_cards() const {
|
||||
return sum;
|
||||
}
|
||||
|
||||
SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(short card_index) {
|
||||
SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
short c = _cards[0];
|
||||
CardIdx_t c = _cards[0];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[0] = card_index; return added; }
|
||||
c = _cards[1];
|
||||
@ -97,16 +98,19 @@ SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(short card_index) {
|
||||
if (c == NullEntry) { _cards[3] = card_index; return added; }
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
short c = _cards[i];
|
||||
CardIdx_t c = _cards[i];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i] = card_index; return added; }
|
||||
if (c == NullEntry) {
|
||||
_cards[i] = card_index;
|
||||
return added;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// Otherwise, we're full.
|
||||
return overflow;
|
||||
}
|
||||
|
||||
void SparsePRTEntry::copy_cards(short* cards) const {
|
||||
void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
cards[0] = _cards[0];
|
||||
@ -130,7 +134,7 @@ RSHashTable::RSHashTable(size_t capacity) :
|
||||
_capacity(capacity), _capacity_mask(capacity-1),
|
||||
_occupied_entries(0), _occupied_cards(0),
|
||||
_entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
|
||||
_buckets(NEW_C_HEAP_ARRAY(short, capacity)),
|
||||
_buckets(NEW_C_HEAP_ARRAY(int, capacity)),
|
||||
_next_deleted(NULL), _deleted(false),
|
||||
_free_list(NullEntry), _free_region(0)
|
||||
{
|
||||
@ -143,7 +147,7 @@ RSHashTable::~RSHashTable() {
|
||||
_entries = NULL;
|
||||
}
|
||||
if (_buckets != NULL) {
|
||||
FREE_C_HEAP_ARRAY(short, _buckets);
|
||||
FREE_C_HEAP_ARRAY(int, _buckets);
|
||||
_buckets = NULL;
|
||||
}
|
||||
}
|
||||
@ -153,14 +157,18 @@ void RSHashTable::clear() {
|
||||
_occupied_cards = 0;
|
||||
guarantee(_entries != NULL, "INV");
|
||||
guarantee(_buckets != NULL, "INV");
|
||||
|
||||
guarantee(_capacity <= ((size_t)1 << (sizeof(int)*BitsPerByte-1)) - 1,
|
||||
"_capacity too large");
|
||||
|
||||
// This will put -1 == NullEntry in the key field of all entries.
|
||||
memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
|
||||
memset(_buckets, -1, _capacity * sizeof(short));
|
||||
memset(_buckets, -1, _capacity * sizeof(int));
|
||||
_free_list = NullEntry;
|
||||
_free_region = 0;
|
||||
}
|
||||
|
||||
bool RSHashTable::add_card(short region_ind, short card_index) {
|
||||
bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
|
||||
SparsePRTEntry* e = entry_for_region_ind_create(region_ind);
|
||||
assert(e != NULL && e->r_ind() == region_ind,
|
||||
"Postcondition of call above.");
|
||||
@ -175,9 +183,9 @@ bool RSHashTable::add_card(short region_ind, short card_index) {
|
||||
return res != SparsePRTEntry::overflow;
|
||||
}
|
||||
|
||||
bool RSHashTable::get_cards(short region_ind, short* cards) {
|
||||
short ind = (short) (region_ind & capacity_mask());
|
||||
short cur_ind = _buckets[ind];
|
||||
bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
@ -192,10 +200,10 @@ bool RSHashTable::get_cards(short region_ind, short* cards) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RSHashTable::delete_entry(short region_ind) {
|
||||
short ind = (short) (region_ind & capacity_mask());
|
||||
short* prev_loc = &_buckets[ind];
|
||||
short cur_ind = *prev_loc;
|
||||
bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int* prev_loc = &_buckets[ind];
|
||||
int cur_ind = *prev_loc;
|
||||
SparsePRTEntry* cur;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
@ -212,10 +220,11 @@ bool RSHashTable::delete_entry(short region_ind) {
|
||||
return true;
|
||||
}
|
||||
|
||||
SparsePRTEntry* RSHashTable::entry_for_region_ind(short region_ind) const {
|
||||
SparsePRTEntry*
|
||||
RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
|
||||
assert(occupied_entries() < capacity(), "Precondition");
|
||||
short ind = (short) (region_ind & capacity_mask());
|
||||
short cur_ind = _buckets[ind];
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
// XXX
|
||||
// int k = 0;
|
||||
@ -242,15 +251,16 @@ SparsePRTEntry* RSHashTable::entry_for_region_ind(short region_ind) const {
|
||||
}
|
||||
}
|
||||
|
||||
SparsePRTEntry* RSHashTable::entry_for_region_ind_create(short region_ind) {
|
||||
SparsePRTEntry*
|
||||
RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
|
||||
SparsePRTEntry* res = entry_for_region_ind(region_ind);
|
||||
if (res == NULL) {
|
||||
short new_ind = alloc_entry();
|
||||
int new_ind = alloc_entry();
|
||||
assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room.");
|
||||
res = entry(new_ind);
|
||||
res->init(region_ind);
|
||||
// Insert at front.
|
||||
short ind = (short) (region_ind & capacity_mask());
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
res->set_next_index(_buckets[ind]);
|
||||
_buckets[ind] = new_ind;
|
||||
_occupied_entries++;
|
||||
@ -258,8 +268,8 @@ SparsePRTEntry* RSHashTable::entry_for_region_ind_create(short region_ind) {
|
||||
return res;
|
||||
}
|
||||
|
||||
short RSHashTable::alloc_entry() {
|
||||
short res;
|
||||
int RSHashTable::alloc_entry() {
|
||||
int res;
|
||||
if (_free_list != NullEntry) {
|
||||
res = _free_list;
|
||||
_free_list = entry(res)->next_index();
|
||||
@ -273,13 +283,11 @@ short RSHashTable::alloc_entry() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RSHashTable::free_entry(short fi) {
|
||||
void RSHashTable::free_entry(int fi) {
|
||||
entry(fi)->set_next_index(_free_list);
|
||||
_free_list = fi;
|
||||
}
|
||||
|
||||
|
||||
void RSHashTable::add_entry(SparsePRTEntry* e) {
|
||||
assert(e->num_valid_cards() > 0, "Precondition.");
|
||||
SparsePRTEntry* e2 = entry_for_region_ind_create(e->r_ind());
|
||||
@ -322,8 +330,8 @@ RSHashTable* RSHashTable::get_from_deleted_list() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
short /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
||||
short res;
|
||||
CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
||||
CardIdx_t res;
|
||||
while (_bl_ind != RSHashTable::NullEntry) {
|
||||
res = _rsht->entry(_bl_ind)->card(0);
|
||||
if (res != SparsePRTEntry::NullEntry) {
|
||||
@ -336,7 +344,7 @@ short /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
||||
return SparsePRTEntry::NullEntry;
|
||||
}
|
||||
|
||||
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(short ci) {
|
||||
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||
return
|
||||
_heap_bot_card_ind
|
||||
+ (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
|
||||
@ -345,7 +353,7 @@ size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(short ci) {
|
||||
|
||||
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
|
||||
_card_ind++;
|
||||
short ci;
|
||||
CardIdx_t ci;
|
||||
if (_card_ind < SparsePRTEntry::CardsPerEntry &&
|
||||
((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
|
||||
SparsePRTEntry::NullEntry)) {
|
||||
@ -379,16 +387,16 @@ bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RSHashTable::contains_card(short region_index, short card_index) const {
|
||||
bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
|
||||
SparsePRTEntry* e = entry_for_region_ind(region_index);
|
||||
return (e != NULL && e->contains_card(card_index));
|
||||
}
|
||||
|
||||
size_t RSHashTable::mem_size() const {
|
||||
return sizeof(this) + capacity() * (sizeof(SparsePRTEntry) + sizeof(short));
|
||||
return sizeof(this) +
|
||||
capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
SparsePRT* SparsePRT::_head_expanded_list = NULL;
|
||||
@ -408,6 +416,7 @@ void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SparsePRT* SparsePRT::get_from_expanded_list() {
|
||||
SparsePRT* hd = _head_expanded_list;
|
||||
while (hd != NULL) {
|
||||
@ -452,6 +461,7 @@ SparsePRT::SparsePRT(HeapRegion* hr) :
|
||||
_next = _cur;
|
||||
}
|
||||
|
||||
|
||||
SparsePRT::~SparsePRT() {
|
||||
assert(_next != NULL && _cur != NULL, "Inv");
|
||||
if (_cur != _next) { delete _cur; }
|
||||
@ -465,7 +475,7 @@ size_t SparsePRT::mem_size() const {
|
||||
return sizeof(this) + _next->mem_size();
|
||||
}
|
||||
|
||||
bool SparsePRT::add_card(short region_id, short card_index) {
|
||||
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
|
||||
#if SPARSE_PRT_VERBOSE
|
||||
gclog_or_tty->print_cr(" Adding card %d from region %d to region %d sparse.",
|
||||
card_index, region_id, _hr->hrs_index());
|
||||
@ -476,11 +486,11 @@ bool SparsePRT::add_card(short region_id, short card_index) {
|
||||
return _next->add_card(region_id, card_index);
|
||||
}
|
||||
|
||||
bool SparsePRT::get_cards(short region_id, short* cards) {
|
||||
bool SparsePRT::get_cards(RegionIdx_t region_id, CardIdx_t* cards) {
|
||||
return _next->get_cards(region_id, cards);
|
||||
}
|
||||
|
||||
bool SparsePRT::delete_entry(short region_id) {
|
||||
bool SparsePRT::delete_entry(RegionIdx_t region_id) {
|
||||
return _next->delete_entry(region_id);
|
||||
}
|
||||
|
||||
|
@ -35,32 +35,32 @@
|
||||
|
||||
class SparsePRTEntry: public CHeapObj {
|
||||
public:
|
||||
|
||||
enum SomePublicConstants {
|
||||
CardsPerEntry = (short)4,
|
||||
NullEntry = (short)-1,
|
||||
DeletedEntry = (short)-2
|
||||
CardsPerEntry = 4,
|
||||
NullEntry = -1
|
||||
};
|
||||
|
||||
private:
|
||||
short _region_ind;
|
||||
short _next_index;
|
||||
short _cards[CardsPerEntry];
|
||||
RegionIdx_t _region_ind;
|
||||
int _next_index;
|
||||
CardIdx_t _cards[CardsPerEntry];
|
||||
|
||||
public:
|
||||
|
||||
// Set the region_ind to the given value, and delete all cards.
|
||||
inline void init(short region_ind);
|
||||
inline void init(RegionIdx_t region_ind);
|
||||
|
||||
short r_ind() const { return _region_ind; }
|
||||
RegionIdx_t r_ind() const { return _region_ind; }
|
||||
bool valid_entry() const { return r_ind() >= 0; }
|
||||
void set_r_ind(short rind) { _region_ind = rind; }
|
||||
void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
|
||||
|
||||
short next_index() const { return _next_index; }
|
||||
short* next_index_addr() { return &_next_index; }
|
||||
void set_next_index(short ni) { _next_index = ni; }
|
||||
int next_index() const { return _next_index; }
|
||||
int* next_index_addr() { return &_next_index; }
|
||||
void set_next_index(int ni) { _next_index = ni; }
|
||||
|
||||
// Returns "true" iff the entry contains the given card index.
|
||||
inline bool contains_card(short card_index) const;
|
||||
inline bool contains_card(CardIdx_t card_index) const;
|
||||
|
||||
// Returns the number of non-NULL card entries.
|
||||
inline int num_valid_cards() const;
|
||||
@ -73,14 +73,14 @@ public:
|
||||
found,
|
||||
added
|
||||
};
|
||||
inline AddCardResult add_card(short card_index);
|
||||
inline AddCardResult add_card(CardIdx_t card_index);
|
||||
|
||||
// Copy the current entry's cards into "cards".
|
||||
inline void copy_cards(short* cards) const;
|
||||
inline void copy_cards(CardIdx_t* cards) const;
|
||||
// Copy the current entry's cards into the "_card" array of "e."
|
||||
inline void copy_cards(SparsePRTEntry* e) const;
|
||||
|
||||
inline short card(int i) const { return _cards[i]; }
|
||||
inline CardIdx_t card(int i) const { return _cards[i]; }
|
||||
};
|
||||
|
||||
|
||||
@ -98,9 +98,9 @@ class RSHashTable : public CHeapObj {
|
||||
size_t _occupied_cards;
|
||||
|
||||
SparsePRTEntry* _entries;
|
||||
short* _buckets;
|
||||
short _free_region;
|
||||
short _free_list;
|
||||
int* _buckets;
|
||||
int _free_region;
|
||||
int _free_list;
|
||||
|
||||
static RSHashTable* _head_deleted_list;
|
||||
RSHashTable* _next_deleted;
|
||||
@ -113,20 +113,20 @@ class RSHashTable : public CHeapObj {
|
||||
// operations, and that the the table be less than completely full. If
|
||||
// an entry for "region_ind" is already in the table, finds it and
|
||||
// returns its address; otherwise returns "NULL."
|
||||
SparsePRTEntry* entry_for_region_ind(short region_ind) const;
|
||||
SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
|
||||
|
||||
// Requires that the caller hold a lock preventing parallel modifying
|
||||
// operations, and that the the table be less than completely full. If
|
||||
// an entry for "region_ind" is already in the table, finds it and
|
||||
// returns its address; otherwise allocates, initializes, inserts and
|
||||
// returns a new entry for "region_ind".
|
||||
SparsePRTEntry* entry_for_region_ind_create(short region_ind);
|
||||
SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
|
||||
|
||||
// Returns the index of the next free entry in "_entries".
|
||||
short alloc_entry();
|
||||
int alloc_entry();
|
||||
// Declares the entry "fi" to be free. (It must have already been
|
||||
// deleted from any bucket lists.
|
||||
void free_entry(short fi);
|
||||
void free_entry(int fi);
|
||||
|
||||
public:
|
||||
RSHashTable(size_t capacity);
|
||||
@ -138,12 +138,12 @@ public:
|
||||
// Otherwise, returns "false" to indicate that the addition would
|
||||
// overflow the entry for the region. The caller must transfer these
|
||||
// entries to a larger-capacity representation.
|
||||
bool add_card(short region_id, short card_index);
|
||||
bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
|
||||
|
||||
bool get_cards(short region_id, short* cards);
|
||||
bool delete_entry(short region_id);
|
||||
bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
|
||||
bool delete_entry(RegionIdx_t region_id);
|
||||
|
||||
bool contains_card(short region_id, short card_index) const;
|
||||
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
|
||||
|
||||
void add_entry(SparsePRTEntry* e);
|
||||
|
||||
@ -162,52 +162,50 @@ public:
|
||||
|
||||
static void add_to_deleted_list(RSHashTable* rsht);
|
||||
static RSHashTable* get_from_deleted_list();
|
||||
|
||||
|
||||
};
|
||||
|
||||
// ValueObj because will be embedded in HRRS iterator.
|
||||
// ValueObj because will be embedded in HRRS iterator.
|
||||
class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
short _tbl_ind;
|
||||
short _bl_ind;
|
||||
short _card_ind;
|
||||
RSHashTable* _rsht;
|
||||
size_t _heap_bot_card_ind;
|
||||
|
||||
enum SomePrivateConstants {
|
||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
||||
};
|
||||
|
||||
// If the bucket list pointed to by _bl_ind contains a card, sets
|
||||
// _bl_ind to the index of that entry, and returns the card.
|
||||
// Otherwise, returns SparseEntry::NullEnty.
|
||||
short find_first_card_in_list();
|
||||
// Computes the proper card index for the card whose offset in the
|
||||
// current region (as indicated by _bl_ind) is "ci".
|
||||
// This is subject to errors when there is iteration concurrent with
|
||||
// modification, but these errors should be benign.
|
||||
size_t compute_card_ind(short ci);
|
||||
|
||||
public:
|
||||
RSHashTableIter(size_t heap_bot_card_ind) :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::CardsPerEntry-1)),
|
||||
_rsht(NULL),
|
||||
_heap_bot_card_ind(heap_bot_card_ind)
|
||||
{}
|
||||
|
||||
void init(RSHashTable* rsht) {
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::CardsPerEntry-1);
|
||||
}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
int _tbl_ind; // [-1, 0.._rsht->_capacity)
|
||||
int _bl_ind; // [-1, 0.._rsht->_capacity)
|
||||
short _card_ind; // [0..CardsPerEntry)
|
||||
RSHashTable* _rsht;
|
||||
size_t _heap_bot_card_ind;
|
||||
|
||||
enum SomePrivateConstants {
|
||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
||||
};
|
||||
|
||||
// If the bucket list pointed to by _bl_ind contains a card, sets
|
||||
// _bl_ind to the index of that entry, and returns the card.
|
||||
// Otherwise, returns SparseEntry::NullEntry.
|
||||
CardIdx_t find_first_card_in_list();
|
||||
|
||||
// Computes the proper card index for the card whose offset in the
|
||||
// current region (as indicated by _bl_ind) is "ci".
|
||||
// This is subject to errors when there is iteration concurrent with
|
||||
// modification, but these errors should be benign.
|
||||
size_t compute_card_ind(CardIdx_t ci);
|
||||
|
||||
public:
|
||||
RSHashTableIter(size_t heap_bot_card_ind) :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::CardsPerEntry-1)),
|
||||
_rsht(NULL),
|
||||
_heap_bot_card_ind(heap_bot_card_ind)
|
||||
{}
|
||||
|
||||
void init(RSHashTable* rsht) {
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::CardsPerEntry-1);
|
||||
}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
};
|
||||
|
||||
// Concurrent accesss to a SparsePRT must be serialized by some external
|
||||
// mutex.
|
||||
|
||||
@ -238,7 +236,6 @@ class SparsePRT VALUE_OBJ_CLASS_SPEC {
|
||||
SparsePRT* next_expanded() { return _next_expanded; }
|
||||
void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
|
||||
|
||||
|
||||
static SparsePRT* _head_expanded_list;
|
||||
|
||||
public:
|
||||
@ -255,16 +252,16 @@ public:
|
||||
// Otherwise, returns "false" to indicate that the addition would
|
||||
// overflow the entry for the region. The caller must transfer these
|
||||
// entries to a larger-capacity representation.
|
||||
bool add_card(short region_id, short card_index);
|
||||
bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
|
||||
|
||||
// If the table hold an entry for "region_ind", Copies its
|
||||
// cards into "cards", which must be an array of length at least
|
||||
// "CardsPerEntry", and returns "true"; otherwise, returns "false".
|
||||
bool get_cards(short region_ind, short* cards);
|
||||
bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
|
||||
|
||||
// If there is an entry for "region_ind", removes it and return "true";
|
||||
// otherwise returns "false."
|
||||
bool delete_entry(short region_ind);
|
||||
bool delete_entry(RegionIdx_t region_ind);
|
||||
|
||||
// Clear the table, and reinitialize to initial capacity.
|
||||
void clear();
|
||||
@ -276,13 +273,12 @@ public:
|
||||
static void cleanup_all();
|
||||
RSHashTable* cur() const { return _cur; }
|
||||
|
||||
|
||||
void init_iterator(SparsePRTIter* sprt_iter);
|
||||
|
||||
static void add_to_expanded_list(SparsePRT* sprt);
|
||||
static SparsePRT* get_from_expanded_list();
|
||||
|
||||
bool contains_card(short region_id, short card_index) const {
|
||||
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
|
||||
return _next->contains_card(region_id, card_index);
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,6 @@ concurrentG1Refine.hpp globalDefinitions.hpp
|
||||
concurrentG1Refine.hpp allocation.hpp
|
||||
concurrentG1Refine.hpp thread.hpp
|
||||
|
||||
|
||||
concurrentG1RefineThread.cpp concurrentG1Refine.hpp
|
||||
concurrentG1RefineThread.cpp concurrentG1RefineThread.hpp
|
||||
concurrentG1RefineThread.cpp g1CollectedHeap.inline.hpp
|
||||
@ -334,6 +333,7 @@ sparsePRT.cpp space.inline.hpp
|
||||
sparsePRT.hpp allocation.hpp
|
||||
sparsePRT.hpp cardTableModRefBS.hpp
|
||||
sparsePRT.hpp globalDefinitions.hpp
|
||||
sparsePRT.hpp g1CollectedHeap.inline.hpp
|
||||
sparsePRT.hpp heapRegion.hpp
|
||||
sparsePRT.hpp mutex.hpp
|
||||
|
||||
|
@ -177,6 +177,7 @@ private:
|
||||
// are double-word aligned in 32-bit VMs, but not in 64-bit VMs, so the 32-bit
|
||||
// granularity is 2, 64-bit is 1.
|
||||
static inline size_t obj_granularity() { return size_t(MinObjAlignment); }
|
||||
static inline int obj_granularity_shift() { return LogMinObjAlignment; }
|
||||
|
||||
HeapWord* _region_start;
|
||||
size_t _region_size;
|
||||
@ -299,13 +300,13 @@ inline bool ParMarkBitMap::is_unmarked(oop obj) const
|
||||
inline size_t
|
||||
ParMarkBitMap::bits_to_words(idx_t bits)
|
||||
{
|
||||
return bits * obj_granularity();
|
||||
return bits << obj_granularity_shift();
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::idx_t
|
||||
ParMarkBitMap::words_to_bits(size_t words)
|
||||
{
|
||||
return words / obj_granularity();
|
||||
return words >> obj_granularity_shift();
|
||||
}
|
||||
|
||||
inline size_t ParMarkBitMap::obj_size(idx_t beg_bit, idx_t end_bit) const
|
||||
|
@ -387,7 +387,7 @@ c1_ValueMap.hpp c1_ValueSet.hpp
|
||||
c1_ValueSet.cpp c1_ValueSet.hpp
|
||||
|
||||
c1_ValueSet.hpp allocation.hpp
|
||||
c1_ValueSet.hpp bitMap.hpp
|
||||
c1_ValueSet.hpp bitMap.inline.hpp
|
||||
c1_ValueSet.hpp c1_Instruction.hpp
|
||||
|
||||
c1_ValueStack.cpp c1_IR.hpp
|
||||
|
@ -242,6 +242,31 @@ class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier {
|
||||
#endif
|
||||
};
|
||||
|
||||
// A SkipGCALot object is used to elide the usual effect of gc-a-lot
|
||||
// over a section of execution by a thread. Currently, it's used only to
|
||||
// prevent re-entrant calls to GC.
|
||||
class SkipGCALot : public StackObj {
|
||||
private:
|
||||
bool _saved;
|
||||
Thread* _t;
|
||||
|
||||
public:
|
||||
#ifdef ASSERT
|
||||
SkipGCALot(Thread* t) : _t(t) {
|
||||
_saved = _t->skip_gcalot();
|
||||
_t->set_skip_gcalot(true);
|
||||
}
|
||||
|
||||
~SkipGCALot() {
|
||||
assert(_t->skip_gcalot(), "Save-restore protocol invariant");
|
||||
_t->set_skip_gcalot(_saved);
|
||||
}
|
||||
#else
|
||||
SkipGCALot(Thread* t) { }
|
||||
~SkipGCALot() { }
|
||||
#endif
|
||||
};
|
||||
|
||||
// JRT_LEAF currently can be called from either _thread_in_Java or
|
||||
// _thread_in_native mode. In _thread_in_native, it is ok
|
||||
// for another thread to trigger GC. The rest of the JRT_LEAF
|
||||
|
@ -66,11 +66,14 @@ void InterfaceSupport::trace(const char* result_type, const char* header) {
|
||||
|
||||
void InterfaceSupport::gc_alot() {
|
||||
Thread *thread = Thread::current();
|
||||
if (thread->is_VM_thread()) return; // Avoid concurrent calls
|
||||
if (!thread->is_Java_thread()) return; // Avoid concurrent calls
|
||||
// Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
|
||||
JavaThread *current_thread = (JavaThread *)thread;
|
||||
if (current_thread->active_handles() == NULL) return;
|
||||
|
||||
// Short-circuit any possible re-entrant gc-a-lot attempt
|
||||
if (thread->skip_gcalot()) return;
|
||||
|
||||
if (is_init_completed()) {
|
||||
|
||||
if (++_fullgc_alot_invocation < FullGCALotStart) {
|
||||
|
@ -127,6 +127,7 @@ Thread::Thread() {
|
||||
debug_only(_owned_locks = NULL;)
|
||||
debug_only(_allow_allocation_count = 0;)
|
||||
NOT_PRODUCT(_allow_safepoint_count = 0;)
|
||||
NOT_PRODUCT(_skip_gcalot = false;)
|
||||
CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
|
||||
_jvmti_env_iteration_count = 0;
|
||||
_vm_operation_started_count = 0;
|
||||
@ -784,7 +785,6 @@ void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
|
||||
// We could enter a safepoint here and thus have a gc
|
||||
InterfaceSupport::check_gc_alot();
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -191,6 +191,9 @@ class Thread: public ThreadShadow {
|
||||
NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
|
||||
debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
|
||||
|
||||
// Used by SkipGCALot class.
|
||||
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
|
||||
|
||||
// Record when GC is locked out via the GC_locker mechanism
|
||||
CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
|
||||
|
||||
@ -308,6 +311,11 @@ class Thread: public ThreadShadow {
|
||||
bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool skip_gcalot() { return _skip_gcalot; }
|
||||
void set_skip_gcalot(bool v) { _skip_gcalot = v; }
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Installs a pending exception to be inserted later
|
||||
static void send_async_exception(oop thread_oop, oop java_throwable);
|
||||
|
@ -531,6 +531,7 @@ void VMThread::execute(VM_Operation* op) {
|
||||
Thread* t = Thread::current();
|
||||
|
||||
if (!t->is_VM_thread()) {
|
||||
SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot
|
||||
// JavaThread or WatcherThread
|
||||
t->check_for_valid_safepoint_state(true);
|
||||
|
||||
|
@ -41,19 +41,6 @@ BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
|
||||
resize(size_in_bits, in_resource_area);
|
||||
}
|
||||
|
||||
|
||||
void BitMap::verify_index(idx_t index) const {
|
||||
assert(index < _size, "BitMap index out of bounds");
|
||||
}
|
||||
|
||||
void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
|
||||
#ifdef ASSERT
|
||||
assert(beg_index <= end_index, "BitMap range error");
|
||||
// Note that [0,0) and [size,size) are both valid ranges.
|
||||
if (end_index != _size) verify_index(end_index);
|
||||
#endif
|
||||
}
|
||||
|
||||
void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
|
||||
assert(size_in_bits >= 0, "just checking");
|
||||
idx_t old_size_in_words = size_in_words();
|
||||
|
@ -93,10 +93,12 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
|
||||
// The index of the first full word in a range.
|
||||
idx_t word_index_round_up(idx_t bit) const;
|
||||
|
||||
// Verification, statistics.
|
||||
void verify_index(idx_t index) const;
|
||||
void verify_range(idx_t beg_index, idx_t end_index) const;
|
||||
// Verification.
|
||||
inline void verify_index(idx_t index) const NOT_DEBUG_RETURN;
|
||||
inline void verify_range(idx_t beg_index, idx_t end_index) const
|
||||
NOT_DEBUG_RETURN;
|
||||
|
||||
// Statistics.
|
||||
static idx_t* _pop_count_table;
|
||||
static void init_pop_count_table();
|
||||
static idx_t num_set_bits(bm_word_t w);
|
||||
@ -287,7 +289,6 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
// Convenience class wrapping BitMap which provides multiple bits per slot.
|
||||
class BitMap2D VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
|
@ -22,6 +22,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef ASSERT
|
||||
inline void BitMap::verify_index(idx_t index) const {
|
||||
assert(index < _size, "BitMap index out of bounds");
|
||||
}
|
||||
|
||||
inline void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
|
||||
assert(beg_index <= end_index, "BitMap range error");
|
||||
// Note that [0,0) and [size,size) are both valid ranges.
|
||||
if (end_index != _size) verify_index(end_index);
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
inline void BitMap::set_bit(idx_t bit) {
|
||||
verify_index(bit);
|
||||
|
@ -106,11 +106,13 @@
|
||||
#ifdef ASSERT
|
||||
#define DEBUG_ONLY(code) code
|
||||
#define NOT_DEBUG(code)
|
||||
#define NOT_DEBUG_RETURN /*next token must be ;*/
|
||||
// Historical.
|
||||
#define debug_only(code) code
|
||||
#else // ASSERT
|
||||
#define DEBUG_ONLY(code)
|
||||
#define NOT_DEBUG(code) code
|
||||
#define NOT_DEBUG_RETURN {}
|
||||
#define debug_only(code)
|
||||
#endif // ASSERT
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user