8146399: Refactor the BlockOffsetTable classes
Reviewed-by: mgerdin, jwilhelm, tschatzl
This commit is contained in:
parent
a476bf01f4
commit
98193d202b
@ -277,7 +277,7 @@ HeapRegion* OldGCAllocRegion::release() {
|
||||
// Determine how far we are from the next card boundary. If it is smaller than
|
||||
// the minimum object size we can allocate into, expand into the next card.
|
||||
HeapWord* top = cur->top();
|
||||
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
|
||||
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetTable::N_bytes);
|
||||
|
||||
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
|
||||
|
||||
|
@ -35,35 +35,29 @@
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetSharedArray
|
||||
// G1BlockOffsetTable
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
|
||||
_reserved(), _end(NULL), _listener(), _offset_array(NULL) {
|
||||
|
||||
_reserved = heap;
|
||||
_end = NULL;
|
||||
G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage) :
|
||||
_reserved(heap), _offset_array(NULL) {
|
||||
|
||||
MemRegion bot_reserved = storage->reserved();
|
||||
|
||||
_offset_array = (u_char*)bot_reserved.start();
|
||||
_end = _reserved.end();
|
||||
|
||||
storage->set_mapping_changed_listener(&_listener);
|
||||
|
||||
log_trace(gc, bot)("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
|
||||
log_trace(gc, bot)("G1BlockOffsetTable::G1BlockOffsetTable: ");
|
||||
log_trace(gc, bot)(" rs.base(): " PTR_FORMAT " rs.size(): " SIZE_FORMAT " rs end(): " PTR_FORMAT,
|
||||
p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
|
||||
}
|
||||
|
||||
bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
||||
bool G1BlockOffsetTable::is_card_boundary(HeapWord* p) const {
|
||||
assert(p >= _reserved.start(), "just checking");
|
||||
size_t delta = pointer_delta(p, _reserved.start());
|
||||
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void G1BlockOffsetSharedArray::check_index(size_t index, const char* msg) const {
|
||||
void G1BlockOffsetTable::check_index(size_t index, const char* msg) const {
|
||||
assert((index) < (_reserved.word_size() >> LogN_words),
|
||||
"%s - index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
|
||||
msg, (index), (_reserved.word_size() >> LogN_words));
|
||||
@ -77,25 +71,19 @@ void G1BlockOffsetSharedArray::check_index(size_t index, const char* msg) const
|
||||
#endif // ASSERT
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArray
|
||||
// G1BlockOffsetTablePart
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetTable(mr.start(), mr.end()),
|
||||
_unallocated_block(_bottom),
|
||||
_array(array), _gsp(NULL) {
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
|
||||
_gsp = sp;
|
||||
}
|
||||
G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp) :
|
||||
_bot(array),
|
||||
_space(gsp),
|
||||
_next_offset_threshold(NULL),
|
||||
_next_offset_index(0)
|
||||
{ }
|
||||
|
||||
// The arguments follow the normal convention of denoting
|
||||
// a right-open interval: [start, end)
|
||||
void
|
||||
G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
|
||||
void G1BlockOffsetTablePart:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
|
||||
|
||||
if (start >= end) {
|
||||
// The start address is equal to the end address (or to
|
||||
@ -137,23 +125,22 @@ G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord*
|
||||
// Move back N (e.g., 8) entries and repeat with the
|
||||
// value of the new entry
|
||||
//
|
||||
size_t start_card = _array->index_for(start);
|
||||
size_t end_card = _array->index_for(end-1);
|
||||
assert(start ==_array->address_for_index(start_card), "Precondition");
|
||||
assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
|
||||
size_t start_card = _bot->index_for(start);
|
||||
size_t end_card = _bot->index_for(end-1);
|
||||
assert(start ==_bot->address_for_index(start_card), "Precondition");
|
||||
assert(end ==_bot->address_for_index(end_card)+N_words, "Precondition");
|
||||
set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
|
||||
}
|
||||
|
||||
// Unlike the normal convention in this code, the argument here denotes
|
||||
// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
|
||||
// above.
|
||||
void
|
||||
G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
|
||||
void G1BlockOffsetTablePart::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
|
||||
if (start_card > end_card) {
|
||||
return;
|
||||
}
|
||||
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
|
||||
assert(_array->offset_array(start_card-1) <= N_words,
|
||||
assert(start_card > _bot->index_for(_space->bottom()), "Cannot be first card");
|
||||
assert(_bot->offset_array(start_card-1) <= N_words,
|
||||
"Offset card has an unexpected value");
|
||||
size_t start_card_for_region = start_card;
|
||||
u_char offset = max_jubyte;
|
||||
@ -164,11 +151,11 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
|
||||
size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
|
||||
offset = N_words + i;
|
||||
if (reach >= end_card) {
|
||||
_array->set_offset_array(start_card_for_region, end_card, offset);
|
||||
_bot->set_offset_array(start_card_for_region, end_card, offset);
|
||||
start_card_for_region = reach + 1;
|
||||
break;
|
||||
}
|
||||
_array->set_offset_array(start_card_for_region, reach, offset);
|
||||
_bot->set_offset_array(start_card_for_region, reach, offset);
|
||||
start_card_for_region = reach + 1;
|
||||
}
|
||||
assert(start_card_for_region > end_card, "Sanity check");
|
||||
@ -178,79 +165,44 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
|
||||
// The card-interval [start_card, end_card] is a closed interval; this
|
||||
// is an expensive check -- use with care and only under protection of
|
||||
// suitable flag.
|
||||
void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
|
||||
void G1BlockOffsetTablePart::check_all_cards(size_t start_card, size_t end_card) const {
|
||||
|
||||
if (end_card < start_card) {
|
||||
return;
|
||||
}
|
||||
guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
|
||||
guarantee(_bot->offset_array(start_card) == N_words, "Wrong value in second card");
|
||||
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
|
||||
u_char entry = _array->offset_array(c);
|
||||
u_char entry = _bot->offset_array(c);
|
||||
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
|
||||
guarantee(entry > N_words,
|
||||
"Should be in logarithmic region - "
|
||||
"entry: %u, "
|
||||
"_array->offset_array(c): %u, "
|
||||
"N_words: %u",
|
||||
(uint)entry, (uint)_array->offset_array(c), (uint)N_words);
|
||||
(uint)entry, (uint)_bot->offset_array(c), (uint)N_words);
|
||||
}
|
||||
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
|
||||
size_t landing_card = c - backskip;
|
||||
guarantee(landing_card >= (start_card - 1), "Inv");
|
||||
if (landing_card >= start_card) {
|
||||
guarantee(_array->offset_array(landing_card) <= entry,
|
||||
guarantee(_bot->offset_array(landing_card) <= entry,
|
||||
"Monotonicity - landing_card offset: %u, "
|
||||
"entry: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)entry);
|
||||
(uint)_bot->offset_array(landing_card), (uint)entry);
|
||||
} else {
|
||||
guarantee(landing_card == start_card - 1, "Tautology");
|
||||
// Note that N_words is the maximum offset value
|
||||
guarantee(_array->offset_array(landing_card) <= N_words,
|
||||
guarantee(_bot->offset_array(landing_card) <= N_words,
|
||||
"landing card offset: %u, "
|
||||
"N_words: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)N_words);
|
||||
(uint)_bot->offset_array(landing_card), (uint)N_words);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
// Must read this exactly once because it can be modified by parallel
|
||||
// allocation.
|
||||
HeapWord* ub = _unallocated_block;
|
||||
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
|
||||
assert(ub < _end, "tautology (see above)");
|
||||
return ub;
|
||||
}
|
||||
// Otherwise, find the block start using the table.
|
||||
HeapWord* q = block_at_or_preceding(addr, false, 0);
|
||||
return forward_to_block_containing_addr(q, addr);
|
||||
}
|
||||
|
||||
// This duplicates a little code from the above: unavoidable.
|
||||
HeapWord*
|
||||
G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
// Must read this exactly once because it can be modified by parallel
|
||||
// allocation.
|
||||
HeapWord* ub = _unallocated_block;
|
||||
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
|
||||
assert(ub < _end, "tautology (see above)");
|
||||
return ub;
|
||||
}
|
||||
// Otherwise, find the block start using the table.
|
||||
HeapWord* q = block_at_or_preceding(addr, false, 0);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
|
||||
HeapWord*
|
||||
G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
HeapWord* n,
|
||||
const void* addr) {
|
||||
HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
HeapWord* n,
|
||||
const void* addr) {
|
||||
// We're not in the normal case. We need to handle an important subcase
|
||||
// here: LAB allocation. An allocation previously recorded in the
|
||||
// offset table was actually a lab allocation, and was divided into
|
||||
@ -260,17 +212,17 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
// If the fist object's end q is at the card boundary. Start refining
|
||||
// with the corresponding card (the value of the entry will be basically
|
||||
// set to 0). If the object crosses the boundary -- start from the next card.
|
||||
size_t n_index = _array->index_for(n);
|
||||
size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
|
||||
size_t n_index = _bot->index_for(n);
|
||||
size_t next_index = _bot->index_for(n) + !_bot->is_card_boundary(n);
|
||||
// Calculate a consistent next boundary. If "n" is not at the boundary
|
||||
// already, step to the boundary.
|
||||
HeapWord* next_boundary = _array->address_for_index(n_index) +
|
||||
HeapWord* next_boundary = _bot->address_for_index(n_index) +
|
||||
(n_index == next_index ? 0 : N_words);
|
||||
assert(next_boundary <= _array->_end,
|
||||
assert(next_boundary <= _bot->_reserved.end(),
|
||||
"next_boundary is beyond the end of the covered region "
|
||||
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
|
||||
p2i(next_boundary), p2i(_array->_end));
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
p2i(next_boundary), p2i(_bot->_reserved.end()));
|
||||
if (addr >= _space->top()) return _space->top();
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
@ -280,18 +232,11 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
alloc_block_work2(&next_boundary, &next_index, q, n);
|
||||
alloc_block_work(&next_boundary, &next_index, q, n);
|
||||
}
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
void G1BlockOffsetArray::resize(size_t new_word_size) {
|
||||
HeapWord* new_end = _bottom + new_word_size;
|
||||
_end = new_end; // update _end
|
||||
}
|
||||
|
||||
//
|
||||
// threshold_
|
||||
// | _index_
|
||||
@ -302,8 +247,8 @@ void G1BlockOffsetArray::resize(size_t new_word_size) {
|
||||
// ( ^ ]
|
||||
// block-start
|
||||
//
|
||||
void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
|
||||
HeapWord* blk_start, HeapWord* blk_end) {
|
||||
void G1BlockOffsetTablePart::alloc_block_work(HeapWord** threshold_, size_t* index_,
|
||||
HeapWord* blk_start, HeapWord* blk_end) {
|
||||
// For efficiency, do copy-in/copy-out.
|
||||
HeapWord* threshold = *threshold_;
|
||||
size_t index = *index_;
|
||||
@ -318,7 +263,7 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
"reference must be into the heap");
|
||||
assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
|
||||
"limit must be within the heap");
|
||||
assert(threshold == _array->_reserved.start() + index*N_words,
|
||||
assert(threshold == _bot->_reserved.start() + index*N_words,
|
||||
"index must agree with threshold");
|
||||
|
||||
DEBUG_ONLY(size_t orig_index = index;)
|
||||
@ -326,26 +271,26 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
// Mark the card that holds the offset into the block. Note
|
||||
// that _next_offset_index and _next_offset_threshold are not
|
||||
// updated until the end of this method.
|
||||
_array->set_offset_array(index, threshold, blk_start);
|
||||
_bot->set_offset_array(index, threshold, blk_start);
|
||||
|
||||
// We need to now mark the subsequent cards that this blk spans.
|
||||
|
||||
// Index of card on which blk ends.
|
||||
size_t end_index = _array->index_for(blk_end - 1);
|
||||
size_t end_index = _bot->index_for(blk_end - 1);
|
||||
|
||||
// Are there more cards left to be updated?
|
||||
if (index + 1 <= end_index) {
|
||||
HeapWord* rem_st = _array->address_for_index(index + 1);
|
||||
HeapWord* rem_st = _bot->address_for_index(index + 1);
|
||||
// Calculate rem_end this way because end_index
|
||||
// may be the last valid index in the covered region.
|
||||
HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
|
||||
HeapWord* rem_end = _bot->address_for_index(end_index) + N_words;
|
||||
set_remainder_to_point_to_start(rem_st, rem_end);
|
||||
}
|
||||
|
||||
index = end_index + 1;
|
||||
// Calculate threshold_ this way because end_index
|
||||
// may be the last valid index in the covered region.
|
||||
threshold = _array->address_for_index(end_index) + N_words;
|
||||
threshold = _bot->address_for_index(end_index) + N_words;
|
||||
assert(threshold >= blk_end, "Incorrect offset threshold");
|
||||
|
||||
// index_ and threshold_ updated here.
|
||||
@ -355,49 +300,49 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
#ifdef ASSERT
|
||||
// The offset can be 0 if the block starts on a boundary. That
|
||||
// is checked by an assertion above.
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert((_array->offset_array(orig_index) == 0 && blk_start == boundary) ||
|
||||
(_array->offset_array(orig_index) > 0 && _array->offset_array(orig_index) <= N_words),
|
||||
size_t start_index = _bot->index_for(blk_start);
|
||||
HeapWord* boundary = _bot->address_for_index(start_index);
|
||||
assert((_bot->offset_array(orig_index) == 0 && blk_start == boundary) ||
|
||||
(_bot->offset_array(orig_index) > 0 && _bot->offset_array(orig_index) <= N_words),
|
||||
"offset array should have been set - "
|
||||
"orig_index offset: %u, "
|
||||
"blk_start: " PTR_FORMAT ", "
|
||||
"boundary: " PTR_FORMAT,
|
||||
(uint)_array->offset_array(orig_index),
|
||||
(uint)_bot->offset_array(orig_index),
|
||||
p2i(blk_start), p2i(boundary));
|
||||
for (size_t j = orig_index + 1; j <= end_index; j++) {
|
||||
assert(_array->offset_array(j) > 0 &&
|
||||
_array->offset_array(j) <=
|
||||
assert(_bot->offset_array(j) > 0 &&
|
||||
_bot->offset_array(j) <=
|
||||
(u_char) (N_words+BlockOffsetArray::N_powers-1),
|
||||
"offset array should have been set - "
|
||||
"%u not > 0 OR %u not <= %u",
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) _bot->offset_array(j),
|
||||
(uint) _bot->offset_array(j),
|
||||
(uint) (N_words+BlockOffsetArray::N_powers-1));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::verify() const {
|
||||
assert(gsp()->bottom() < gsp()->top(), "Only non-empty regions should be verified.");
|
||||
size_t start_card = _array->index_for(gsp()->bottom());
|
||||
size_t end_card = _array->index_for(gsp()->top() - 1);
|
||||
void G1BlockOffsetTablePart::verify() const {
|
||||
assert(_space->bottom() < _space->top(), "Only non-empty regions should be verified.");
|
||||
size_t start_card = _bot->index_for(_space->bottom());
|
||||
size_t end_card = _bot->index_for(_space->top() - 1);
|
||||
|
||||
for (size_t current_card = start_card; current_card < end_card; current_card++) {
|
||||
u_char entry = _array->offset_array(current_card);
|
||||
u_char entry = _bot->offset_array(current_card);
|
||||
if (entry < N_words) {
|
||||
// The entry should point to an object before the current card. Verify that
|
||||
// it is possible to walk from that object in to the current card by just
|
||||
// iterating over the objects following it.
|
||||
HeapWord* card_address = _array->address_for_index(current_card);
|
||||
HeapWord* card_address = _bot->address_for_index(current_card);
|
||||
HeapWord* obj_end = card_address - entry;
|
||||
while (obj_end < card_address) {
|
||||
HeapWord* obj = obj_end;
|
||||
size_t obj_size = block_size(obj);
|
||||
obj_end = obj + obj_size;
|
||||
guarantee(obj_end > obj && obj_end <= gsp()->top(),
|
||||
guarantee(obj_end > obj && obj_end <= _space->top(),
|
||||
"Invalid object end. obj: " PTR_FORMAT " obj_size: " SIZE_FORMAT " obj_end: " PTR_FORMAT " top: " PTR_FORMAT,
|
||||
p2i(obj), obj_size, p2i(obj_end), p2i(gsp()->top()));
|
||||
p2i(obj), obj_size, p2i(obj_end), p2i(_space->top()));
|
||||
}
|
||||
} else {
|
||||
// Because we refine the BOT based on which cards are dirty there is not much we can verify here.
|
||||
@ -411,103 +356,66 @@ void G1BlockOffsetArray::verify() const {
|
||||
"Going backwards beyond the start_card. start_card: " SIZE_FORMAT " current_card: " SIZE_FORMAT " backskip: " SIZE_FORMAT,
|
||||
start_card, current_card, backskip);
|
||||
|
||||
HeapWord* backskip_address = _array->address_for_index(current_card - backskip);
|
||||
guarantee(backskip_address >= gsp()->bottom(),
|
||||
HeapWord* backskip_address = _bot->address_for_index(current_card - backskip);
|
||||
guarantee(backskip_address >= _space->bottom(),
|
||||
"Going backwards beyond bottom of the region: bottom: " PTR_FORMAT ", backskip_address: " PTR_FORMAT,
|
||||
p2i(gsp()->bottom()), p2i(backskip_address));
|
||||
p2i(_space->bottom()), p2i(backskip_address));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
G1BlockOffsetArray::print_on(outputStream* out) {
|
||||
size_t from_index = _array->index_for(_bottom);
|
||||
size_t to_index = _array->index_for(_end);
|
||||
G1BlockOffsetTablePart::print_on(outputStream* out) {
|
||||
size_t from_index = _bot->index_for(_space->bottom());
|
||||
size_t to_index = _bot->index_for(_space->end());
|
||||
out->print_cr(">> BOT for area [" PTR_FORMAT "," PTR_FORMAT ") "
|
||||
"cards [" SIZE_FORMAT "," SIZE_FORMAT ")",
|
||||
p2i(_bottom), p2i(_end), from_index, to_index);
|
||||
p2i(_space->bottom()), p2i(_space->end()), from_index, to_index);
|
||||
for (size_t i = from_index; i < to_index; ++i) {
|
||||
out->print_cr(" entry " SIZE_FORMAT_W(8) " | " PTR_FORMAT " : %3u",
|
||||
i, p2i(_array->address_for_index(i)),
|
||||
(uint) _array->offset_array(i));
|
||||
i, p2i(_bot->address_for_index(i)),
|
||||
(uint) _bot->offset_array(i));
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArrayContigSpace
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
HeapWord*
|
||||
G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
|
||||
return forward_to_block_containing_addr(q, addr);
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
G1BlockOffsetArrayContigSpace::
|
||||
block_start_unsafe_const(const void* addr) const {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
G1BlockOffsetArrayContigSpace::
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetArray(array, mr)
|
||||
{
|
||||
_next_offset_threshold = NULL;
|
||||
_next_offset_index = 0;
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _array->index_for_raw(_bottom);
|
||||
_next_offset_index++;
|
||||
_next_offset_threshold =
|
||||
_array->address_for_index_raw(_next_offset_index);
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
size_t bottom_index = _array->index_for_raw(_bottom);
|
||||
assert(_array->address_for_index_raw(bottom_index) == _bottom,
|
||||
"Precondition of call");
|
||||
_array->set_offset_array_raw(bottom_index, 0);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _array->index_for(_bottom);
|
||||
_next_offset_index++;
|
||||
_next_offset_threshold =
|
||||
_array->address_for_index(_next_offset_index);
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top, size_t fill_size) {
|
||||
// The first BOT entry should have offset 0.
|
||||
reset_bot();
|
||||
alloc_block(_bottom, obj_top);
|
||||
if (fill_size > 0) {
|
||||
alloc_block(obj_top, fill_size);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
|
||||
G1BlockOffsetArray::print_on(out);
|
||||
out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold));
|
||||
out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
HeapWord* G1BlockOffsetTablePart::initialize_threshold_raw() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _bot->index_for_raw(_space->bottom());
|
||||
_next_offset_index++;
|
||||
_next_offset_threshold =
|
||||
_bot->address_for_index_raw(_next_offset_index);
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetTablePart::zero_bottom_entry_raw() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
|
||||
"just checking");
|
||||
size_t bottom_index = _bot->index_for_raw(_space->bottom());
|
||||
assert(_bot->address_for_index_raw(bottom_index) == _space->bottom(),
|
||||
"Precondition of call");
|
||||
_bot->set_offset_array_raw(bottom_index, 0);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetTablePart::initialize_threshold() {
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _bot->index_for(_space->bottom());
|
||||
_next_offset_index++;
|
||||
_next_offset_threshold =
|
||||
_bot->address_for_index(_next_offset_index);
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetTablePart::set_for_starts_humongous(HeapWord* obj_top, size_t fill_size) {
|
||||
// The first BOT entry should have offset 0.
|
||||
reset_bot();
|
||||
alloc_block(_space->bottom(), obj_top);
|
||||
if (fill_size > 0) {
|
||||
alloc_block(obj_top, fill_size);
|
||||
}
|
||||
}
|
||||
|
@ -30,119 +30,25 @@
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// The CollectedHeap type requires subtypes to implement a method
|
||||
// "block_start". For some subtypes, notably generational
|
||||
// systems using card-table-based write barriers, the efficiency of this
|
||||
// operation may be important. Implementations of the "BlockOffsetArray"
|
||||
// class may be useful in providing such efficient implementations.
|
||||
//
|
||||
// While generally mirroring the structure of the BOT for GenCollectedHeap,
|
||||
// the following types are tailored more towards G1's uses; these should,
|
||||
// however, be merged back into a common BOT to avoid code duplication
|
||||
// and reduce maintenance overhead.
|
||||
//
|
||||
// G1BlockOffsetTable (abstract)
|
||||
// -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
|
||||
// -- G1BlockOffsetArrayContigSpace
|
||||
//
|
||||
// A main impediment to the consolidation of this code might be the
|
||||
// effect of making some of the block_start*() calls non-const as
|
||||
// below. Whether that might adversely affect performance optimizations
|
||||
// that compilers might normally perform in the case of non-G1
|
||||
// collectors needs to be carefully investigated prior to any such
|
||||
// consolidation.
|
||||
|
||||
// Forward declarations
|
||||
class G1BlockOffsetSharedArray;
|
||||
class G1OffsetTableContigSpace;
|
||||
|
||||
class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
// These members describe the region covered by the table.
|
||||
|
||||
// The space this table is covering.
|
||||
HeapWord* _bottom; // == reserved.start
|
||||
HeapWord* _end; // End of currently allocated region.
|
||||
|
||||
public:
|
||||
// Initialize the table to cover the given space.
|
||||
// The contents of the initial table are undefined.
|
||||
G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
|
||||
_bottom(bottom), _end(end)
|
||||
{
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
virtual void resize(size_t new_word_size) = 0;
|
||||
|
||||
virtual void set_bottom(HeapWord* new_bottom) {
|
||||
assert(new_bottom <= _end,
|
||||
"new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
|
||||
p2i(new_bottom), p2i(_end));
|
||||
_bottom = new_bottom;
|
||||
resize(pointer_delta(_end, _bottom));
|
||||
}
|
||||
|
||||
// Requires "addr" to be contained by a block, and returns the address of
|
||||
// the start of that block. (May have side effects, namely updating of
|
||||
// shared array entries that "point" too far backwards. This can occur,
|
||||
// for example, when LAB allocation is used in a space covered by the
|
||||
// table.)
|
||||
virtual HeapWord* block_start_unsafe(const void* addr) = 0;
|
||||
// Same as above, but does not have any of the possible side effects
|
||||
// discussed above.
|
||||
virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
|
||||
|
||||
// Returns the address of the start of the block containing "addr", or
|
||||
// else "null" if it is covered by no block. (May have side effects,
|
||||
// namely updating of shared array entries that "point" too far
|
||||
// backwards. This can occur, for example, when lab allocation is used
|
||||
// in a space covered by the table.)
|
||||
inline HeapWord* block_start(const void* addr);
|
||||
// Same as above, but does not have any of the possible side effects
|
||||
// discussed above.
|
||||
inline HeapWord* block_start_const(const void* addr) const;
|
||||
};
|
||||
|
||||
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
|
||||
public:
|
||||
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
|
||||
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||
// retrieve it here since this would cause firing of several asserts. The code
|
||||
// executed after commit of a region already needs to do some re-initialization of
|
||||
// the HeapRegion, so we combine that.
|
||||
}
|
||||
};
|
||||
class G1BlockOffsetTable;
|
||||
class G1ContiguousSpace;
|
||||
|
||||
// This implementation of "G1BlockOffsetTable" divides the covered region
|
||||
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
|
||||
// for each such subregion indicates how far back one must go to find the
|
||||
// start of the chunk that includes the first word of the subregion.
|
||||
//
|
||||
// Each BlockOffsetArray is owned by a Space. However, the actual array
|
||||
// may be shared by several BlockOffsetArrays; this is useful
|
||||
// when a single resizable area (such as a generation) is divided up into
|
||||
// several spaces in which contiguous allocation takes place,
|
||||
// such as, for example, in G1 or in the train generation.)
|
||||
// Each G1BlockOffsetTablePart is owned by a G1ContiguousSpace.
|
||||
|
||||
// Here is the shared array type.
|
||||
|
||||
class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
friend class G1BlockOffsetArray;
|
||||
friend class G1BlockOffsetArrayContigSpace;
|
||||
class G1BlockOffsetTable: public CHeapObj<mtGC> {
|
||||
friend class G1BlockOffsetTablePart;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
G1BlockOffsetSharedArrayMappingChangedListener _listener;
|
||||
// The reserved region covered by the shared array.
|
||||
// The reserved region covered by the table.
|
||||
MemRegion _reserved;
|
||||
|
||||
// End of the current committed region.
|
||||
HeapWord* _end;
|
||||
|
||||
// Array for keeping offsets for retrieving object start fast given an
|
||||
// address.
|
||||
u_char* _offset_array; // byte array keeping backwards offsets
|
||||
@ -192,13 +98,9 @@ public:
|
||||
N_words = 1 << LogN_words
|
||||
};
|
||||
|
||||
// Initialize the table to cover from "base" to (at least)
|
||||
// "base + init_word_size". In the future, the table may be expanded
|
||||
// (see "resize" below) up to the size of "_reserved" (which must be at
|
||||
// least "init_word_size".) The contents of the initial table are
|
||||
// undefined; it is the responsibility of the constituent
|
||||
// G1BlockOffsetTable(s) to initialize cards.
|
||||
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||
// Initialize the Block Offset Table to cover the memory region passed
|
||||
// in the heap parameter.
|
||||
G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||
|
||||
// Return the appropriate index into "_offset_array" for "p".
|
||||
inline size_t index_for(const void* p) const;
|
||||
@ -213,29 +115,24 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// And here is the G1BlockOffsetTable subtype that uses the array.
|
||||
|
||||
class G1BlockOffsetArray: public G1BlockOffsetTable {
|
||||
friend class G1BlockOffsetSharedArray;
|
||||
friend class G1BlockOffsetArrayContigSpace;
|
||||
class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
|
||||
friend class G1BlockOffsetTable;
|
||||
friend class VMStructs;
|
||||
private:
|
||||
enum SomePrivateConstants {
|
||||
N_words = G1BlockOffsetSharedArray::N_words,
|
||||
LogN = G1BlockOffsetSharedArray::LogN
|
||||
N_words = G1BlockOffsetTable::N_words,
|
||||
LogN = G1BlockOffsetTable::LogN
|
||||
};
|
||||
|
||||
// This is the array, which can be shared by several BlockOffsetArray's
|
||||
// servicing different
|
||||
G1BlockOffsetSharedArray* _array;
|
||||
// allocation boundary at which offset array must be updated
|
||||
HeapWord* _next_offset_threshold;
|
||||
size_t _next_offset_index; // index corresponding to that boundary
|
||||
|
||||
// This is the global BlockOffsetTable.
|
||||
G1BlockOffsetTable* _bot;
|
||||
|
||||
// The space that owns this subregion.
|
||||
G1OffsetTableContigSpace* _gsp;
|
||||
|
||||
// The portion [_unallocated_block, _sp.end()) of the space that
|
||||
// is a single block known not to contain any objects.
|
||||
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
|
||||
HeapWord* _unallocated_block;
|
||||
G1ContiguousSpace* _space;
|
||||
|
||||
// Sets the entries
|
||||
// corresponding to the cards starting at "start" and ending at "end"
|
||||
@ -246,9 +143,12 @@ private:
|
||||
// that is closed: [start_index, end_index]
|
||||
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
|
||||
|
||||
protected:
|
||||
|
||||
G1OffsetTableContigSpace* gsp() const { return _gsp; }
|
||||
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
|
||||
// memory first.
|
||||
void zero_bottom_entry_raw();
|
||||
// Variant of initialize_threshold that does not check for availability of the
|
||||
// memory first.
|
||||
HeapWord* initialize_threshold_raw();
|
||||
|
||||
inline size_t block_size(const HeapWord* p) const;
|
||||
|
||||
@ -263,9 +163,8 @@ protected:
|
||||
// next block (or the end of the space.) Return the address of the
|
||||
// beginning of the block that contains "addr". Does so without side
|
||||
// effects (see, e.g., spec of block_start.)
|
||||
inline HeapWord*
|
||||
forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const;
|
||||
inline HeapWord* forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const;
|
||||
|
||||
// "q" is a block boundary that is <= "addr"; return the address of the
|
||||
// beginning of the block that contains "addr". May have side effects
|
||||
@ -288,60 +187,26 @@ protected:
|
||||
// starting at "*threshold_", and for any other indices crossed by the
|
||||
// block. Updates "*threshold_" and "*index_" to correspond to the first
|
||||
// index after the block end.
|
||||
void alloc_block_work2(HeapWord** threshold_, size_t* index_,
|
||||
HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
public:
|
||||
// The space may not have it's bottom and top set yet, which is why the
|
||||
// region is passed as a parameter. The elements of the array are
|
||||
// initialized to zero.
|
||||
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
|
||||
// Note: this ought to be part of the constructor, but that would require
|
||||
// "this" to be passed as a parameter to a member constructor for
|
||||
// the containing concrete subtype of Space.
|
||||
// This would be legal C++, but MS VC++ doesn't allow it.
|
||||
void set_space(G1OffsetTableContigSpace* sp);
|
||||
|
||||
// Resets the covered region to one with the same _bottom as before but
|
||||
// the "new_word_size".
|
||||
void resize(size_t new_word_size);
|
||||
|
||||
virtual HeapWord* block_start_unsafe(const void* addr);
|
||||
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
void alloc_block_work(HeapWord** threshold_, size_t* index_,
|
||||
HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||
|
||||
public:
|
||||
// The elements of the array are initialized to zero.
|
||||
G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp);
|
||||
|
||||
void verify() const;
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// A subtype of BlockOffsetArray that takes advantage of the fact
|
||||
// that its underlying space is a ContiguousSpace, so that its "active"
|
||||
// region can be more efficiently tracked (than for a non-contiguous space).
|
||||
class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
||||
friend class VMStructs;
|
||||
|
||||
// allocation boundary at which offset array must be updated
|
||||
HeapWord* _next_offset_threshold;
|
||||
size_t _next_offset_index; // index corresponding to that boundary
|
||||
|
||||
// Work function to be called when allocation start crosses the next
|
||||
// threshold in the contig space.
|
||||
void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
|
||||
blk_start, blk_end);
|
||||
}
|
||||
|
||||
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
|
||||
// memory first.
|
||||
void zero_bottom_entry_raw();
|
||||
// Variant of initialize_threshold that does not check for availability of the
|
||||
// memory first.
|
||||
HeapWord* initialize_threshold_raw();
|
||||
public:
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
// Returns the address of the start of the block containing "addr", or
|
||||
// else "null" if it is covered by no block. (May have side effects,
|
||||
// namely updating of shared array entries that "point" too far
|
||||
// backwards. This can occur, for example, when lab allocation is used
|
||||
// in a space covered by the table.)
|
||||
inline HeapWord* block_start(const void* addr);
|
||||
// Same as above, but does not have any of the possible side effects
|
||||
// discussed above.
|
||||
inline HeapWord* block_start_const(const void* addr) const;
|
||||
|
||||
// Initialize the threshold to reflect the first boundary after the
|
||||
// bottom of the covered region.
|
||||
@ -362,19 +227,16 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
||||
// never exceeds the "_next_offset_threshold".
|
||||
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (blk_end > _next_offset_threshold) {
|
||||
alloc_block_work1(blk_start, blk_end);
|
||||
alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
|
||||
}
|
||||
}
|
||||
void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk+size);
|
||||
}
|
||||
|
||||
HeapWord* block_start_unsafe(const void* addr);
|
||||
HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
|
||||
void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
|
||||
|
@ -30,34 +30,36 @@
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
|
||||
if (addr >= _bottom && addr < _end) {
|
||||
return block_start_unsafe(addr);
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr) {
|
||||
if (addr >= _space->bottom() && addr < _space->end()) {
|
||||
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
|
||||
return forward_to_block_containing_addr(q, addr);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetTable::block_start_const(const void* addr) const {
|
||||
if (addr >= _bottom && addr < _end) {
|
||||
return block_start_unsafe_const(addr);
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_start_const(const void* addr) const {
|
||||
if (addr >= _space->bottom() && addr < _space->end()) {
|
||||
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
|
||||
u_char G1BlockOffsetTable::offset_array(size_t index) const {
|
||||
check_index(index, "index out of range");
|
||||
return _offset_array[index];
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
|
||||
void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) {
|
||||
check_index(index, "index out of range");
|
||||
set_offset_array_raw(index, offset);
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
|
||||
void G1BlockOffsetTable::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
|
||||
check_index(index, "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
size_t offset = pointer_delta(high, low);
|
||||
@ -65,7 +67,7 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, He
|
||||
set_offset_array(index, (u_char)offset);
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
|
||||
void G1BlockOffsetTable::set_offset_array(size_t left, size_t right, u_char offset) {
|
||||
check_index(right, "right index out of range");
|
||||
assert(left <= right, "indexes out of order");
|
||||
size_t num_cards = right - left + 1;
|
||||
@ -73,11 +75,11 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
|
||||
}
|
||||
|
||||
// Variant of index_for that does not check the index for validity.
|
||||
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
|
||||
inline size_t G1BlockOffsetTable::index_for_raw(const void* p) const {
|
||||
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
|
||||
}
|
||||
|
||||
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
||||
inline size_t G1BlockOffsetTable::index_for(const void* p) const {
|
||||
char* pc = (char*)p;
|
||||
assert(pc >= (char*)_reserved.start() &&
|
||||
pc < (char*)_reserved.end(),
|
||||
@ -88,8 +90,7 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
inline HeapWord* G1BlockOffsetTable::address_for_index(size_t index) const {
|
||||
check_index(index, "index out of range");
|
||||
HeapWord* result = address_for_index_raw(index);
|
||||
assert(result >= _reserved.start() && result < _reserved.end(),
|
||||
@ -99,47 +100,45 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline size_t
|
||||
G1BlockOffsetArray::block_size(const HeapWord* p) const {
|
||||
return gsp()->block_size(p);
|
||||
inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p) const {
|
||||
return _space->block_size(p);
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetArray::block_at_or_preceding(const void* addr,
|
||||
bool has_max_index,
|
||||
size_t max_index) const {
|
||||
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
|
||||
size_t index = _array->index_for(addr);
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr,
|
||||
bool has_max_index,
|
||||
size_t max_index) const {
|
||||
assert(_bot->offset_array(0) == 0, "objects can't cross covered areas");
|
||||
size_t index = _bot->index_for(addr);
|
||||
// We must make sure that the offset table entry we use is valid. If
|
||||
// "addr" is past the end, start at the last known one and go forward.
|
||||
if (has_max_index) {
|
||||
index = MIN2(index, max_index);
|
||||
}
|
||||
HeapWord* q = _array->address_for_index(index);
|
||||
HeapWord* q = _bot->address_for_index(index);
|
||||
|
||||
uint offset = _array->offset_array(index); // Extend u_char to uint.
|
||||
uint offset = _bot->offset_array(index); // Extend u_char to uint.
|
||||
while (offset >= N_words) {
|
||||
// The excess of the offset from N_words indicates a power of Base
|
||||
// to go back by.
|
||||
size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
|
||||
q -= (N_words * n_cards_back);
|
||||
index -= n_cards_back;
|
||||
offset = _array->offset_array(index);
|
||||
offset = _bot->offset_array(index);
|
||||
}
|
||||
assert(offset < N_words, "offset too large");
|
||||
q -= offset;
|
||||
return q;
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetArray::
|
||||
forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const {
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const {
|
||||
if (addr >= _space->top()) return _space->top();
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
return q;
|
||||
}
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= n, "wrong order for q and addr");
|
||||
@ -147,10 +146,11 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
return q;
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
||||
const void* addr) {
|
||||
if (oop(q)->klass_or_null() == NULL) return q;
|
||||
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q,
|
||||
const void* addr) {
|
||||
if (oop(q)->klass_or_null() == NULL) {
|
||||
return q;
|
||||
}
|
||||
HeapWord* n = q + block_size(q);
|
||||
// In the normal case, where the query "addr" is a card boundary, and the
|
||||
// offset table chunks are the same size as cards, the block starting at
|
||||
|
@ -1745,7 +1745,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_is_alive_closure_stw(this),
|
||||
_ref_processor_cm(NULL),
|
||||
_ref_processor_stw(NULL),
|
||||
_bot_shared(NULL),
|
||||
_bot(NULL),
|
||||
_cg1r(NULL),
|
||||
_g1mm(NULL),
|
||||
_refine_cte_cl(NULL),
|
||||
@ -1906,8 +1906,8 @@ jint G1CollectedHeap::initialize() {
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
create_aux_memory_mapper("Block offset table",
|
||||
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetSharedArray::heap_map_factor());
|
||||
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetTable::heap_map_factor());
|
||||
|
||||
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
|
||||
G1RegionToSpaceMapper* cardtable_storage =
|
||||
@ -1945,7 +1945,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
||||
|
||||
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
|
||||
_bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
|
||||
|
||||
{
|
||||
HeapWord* start = _hrm.reserved().start();
|
||||
|
@ -154,7 +154,7 @@ private:
|
||||
uint _expansion_regions;
|
||||
|
||||
// The block offset table for the G1 heap.
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
G1BlockOffsetTable* _bot;
|
||||
|
||||
// Tears down the region sets / lists so that they are empty and the
|
||||
// regions on the heap do not belong to a region set / list. The
|
||||
@ -1008,7 +1008,7 @@ public:
|
||||
void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
|
||||
|
||||
// The shared block offset table array.
|
||||
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
|
||||
G1BlockOffsetTable* bot() const { return _bot; }
|
||||
|
||||
// Reference Processing accessors
|
||||
|
||||
|
@ -36,5 +36,5 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
MemRegion mr) {
|
||||
return new HeapRegion(hrs_index, bot_shared(), mr);
|
||||
return new HeapRegion(hrs_index, bot(), mr);
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ ScanRSClosure::ScanRSClosure(G1ParPushHeapRSClosure* oc,
|
||||
_worker_i(worker_i),
|
||||
_try_claimed(false) {
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_bot_shared = _g1h->bot_shared();
|
||||
_bot = _g1h->bot();
|
||||
_ct_bs = _g1h->g1_barrier_set();
|
||||
_block_size = MAX2<size_t>(G1RSetScanBlockSize, 1);
|
||||
}
|
||||
@ -109,7 +109,7 @@ void ScanRSClosure::scanCard(size_t index, HeapRegion *r) {
|
||||
|
||||
// Set the "from" region in the closure.
|
||||
_oc->set_region(r);
|
||||
MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
|
||||
MemRegion card_region(_bot->address_for_index(index), G1BlockOffsetTable::N_words);
|
||||
MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
|
||||
MemRegion mr = pre_gc_allocated.intersection(card_region);
|
||||
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
||||
@ -153,7 +153,7 @@ bool ScanRSClosure::doHeapRegion(HeapRegion* r) {
|
||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
}
|
||||
if (current_card < jump_to_card) continue;
|
||||
HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
|
||||
HeapWord* card_start = _g1h->bot()->address_for_index(card_index);
|
||||
|
||||
HeapRegion* card_region = _g1h->heap_region_containing(card_start);
|
||||
_cards++;
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
class BitMap;
|
||||
class CardTableModRefBS;
|
||||
class G1BlockOffsetSharedArray;
|
||||
class G1BlockOffsetTable;
|
||||
class ConcurrentG1Refine;
|
||||
class CodeBlobClosure;
|
||||
class G1CollectedHeap;
|
||||
@ -171,7 +171,7 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||
G1ParPushHeapRSClosure* _oc;
|
||||
CodeBlobClosure* _code_root_cl;
|
||||
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
G1BlockOffsetTable* _bot;
|
||||
G1SATBCardTableModRefBS *_ct_bs;
|
||||
|
||||
double _strong_code_root_scan_time_sec;
|
||||
|
@ -153,7 +153,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
||||
}
|
||||
|
||||
void HeapRegion::reset_after_compaction() {
|
||||
G1OffsetTableContigSpace::reset_after_compaction();
|
||||
G1ContiguousSpace::reset_after_compaction();
|
||||
// After a compaction the mark bitmap is invalid, so we must
|
||||
// treat all objects as being inside the unmarked area.
|
||||
zero_marked_bytes();
|
||||
@ -183,7 +183,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||
}
|
||||
zero_marked_bytes();
|
||||
|
||||
_offsets.resize(HeapRegion::GrainWords);
|
||||
init_top_at_mark_start();
|
||||
if (clear_space) clear(SpaceDecorator::Mangle);
|
||||
}
|
||||
@ -219,7 +218,7 @@ void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
|
||||
_type.set_starts_humongous();
|
||||
_humongous_start_region = this;
|
||||
|
||||
_offsets.set_for_starts_humongous(obj_top, fill_size);
|
||||
_bot_part.set_for_starts_humongous(obj_top, fill_size);
|
||||
}
|
||||
|
||||
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
|
||||
@ -239,9 +238,9 @@ void HeapRegion::clear_humongous() {
|
||||
}
|
||||
|
||||
HeapRegion::HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
G1BlockOffsetTable* bot,
|
||||
MemRegion mr) :
|
||||
G1OffsetTableContigSpace(sharedOffsetArray, mr),
|
||||
G1ContiguousSpace(bot),
|
||||
_hrm_index(hrm_index),
|
||||
_allocation_context(AllocationContext::system()),
|
||||
_humongous_start_region(NULL),
|
||||
@ -257,7 +256,7 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
||||
_predicted_bytes_to_copy(0)
|
||||
{
|
||||
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
|
||||
_rem_set = new HeapRegionRemSet(bot, this);
|
||||
|
||||
initialize(mr);
|
||||
}
|
||||
@ -265,7 +264,7 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
assert(_rem_set->is_empty(), "Remembered set must be empty");
|
||||
|
||||
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
|
||||
G1ContiguousSpace::initialize(mr, clear_space, mangle_space);
|
||||
|
||||
hr_clear(false /*par*/, false /*clear_space*/);
|
||||
set_top(bottom());
|
||||
@ -773,7 +772,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
}
|
||||
|
||||
if (!is_young() && !is_empty()) {
|
||||
_offsets.verify();
|
||||
_bot_part.verify();
|
||||
}
|
||||
|
||||
if (is_region_humongous) {
|
||||
@ -797,7 +796,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
if (p < the_end) {
|
||||
// Look up top
|
||||
HeapWord* addr_1 = p;
|
||||
HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
|
||||
HeapWord* b_start_1 = _bot_part.block_start_const(addr_1);
|
||||
if (b_start_1 != p) {
|
||||
log_info(gc, verify)("BOT look up for top: " PTR_FORMAT " "
|
||||
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
|
||||
@ -809,7 +808,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
// Look up top + 1
|
||||
HeapWord* addr_2 = p + 1;
|
||||
if (addr_2 < the_end) {
|
||||
HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
|
||||
HeapWord* b_start_2 = _bot_part.block_start_const(addr_2);
|
||||
if (b_start_2 != p) {
|
||||
log_info(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
|
||||
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
|
||||
@ -823,7 +822,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
size_t diff = pointer_delta(the_end, p) / 2;
|
||||
HeapWord* addr_3 = p + diff;
|
||||
if (addr_3 < the_end) {
|
||||
HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
|
||||
HeapWord* b_start_3 = _bot_part.block_start_const(addr_3);
|
||||
if (b_start_3 != p) {
|
||||
log_info(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
|
||||
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
|
||||
@ -835,7 +834,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
|
||||
// Look up end - 1
|
||||
HeapWord* addr_4 = the_end - 1;
|
||||
HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
|
||||
HeapWord* b_start_4 = _bot_part.block_start_const(addr_4);
|
||||
if (b_start_4 != p) {
|
||||
log_info(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
|
||||
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
|
||||
@ -860,52 +859,41 @@ void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
|
||||
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
|
||||
// away eventually.
|
||||
|
||||
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
||||
void G1ContiguousSpace::clear(bool mangle_space) {
|
||||
set_top(bottom());
|
||||
_scan_top = bottom();
|
||||
CompactibleSpace::clear(mangle_space);
|
||||
reset_bot();
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
|
||||
Space::set_bottom(new_bottom);
|
||||
_offsets.set_bottom(new_bottom);
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
|
||||
assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
|
||||
Space::set_end(new_end);
|
||||
_offsets.resize(new_end - bottom());
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1OffsetTableContigSpace::mangle_unused_area() {
|
||||
void G1ContiguousSpace::mangle_unused_area() {
|
||||
mangle_unused_area_complete();
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::mangle_unused_area_complete() {
|
||||
void G1ContiguousSpace::mangle_unused_area_complete() {
|
||||
SpaceMangler::mangle_region(MemRegion(top(), end()));
|
||||
}
|
||||
#endif
|
||||
|
||||
void G1OffsetTableContigSpace::print() const {
|
||||
void G1ContiguousSpace::print() const {
|
||||
print_short();
|
||||
tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
|
||||
INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
||||
p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
|
||||
p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end()));
|
||||
}
|
||||
|
||||
HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
|
||||
return _offsets.initialize_threshold();
|
||||
HeapWord* G1ContiguousSpace::initialize_threshold() {
|
||||
return _bot_part.initialize_threshold();
|
||||
}
|
||||
|
||||
HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
|
||||
HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start,
|
||||
HeapWord* end) {
|
||||
_offsets.alloc_block(start, end);
|
||||
return _offsets.threshold();
|
||||
_bot_part.alloc_block(start, end);
|
||||
return _bot_part.threshold();
|
||||
}
|
||||
|
||||
HeapWord* G1OffsetTableContigSpace::scan_top() const {
|
||||
HeapWord* G1ContiguousSpace::scan_top() const {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
HeapWord* local_top = top();
|
||||
OrderAccess::loadload();
|
||||
@ -918,7 +906,7 @@ HeapWord* G1OffsetTableContigSpace::scan_top() const {
|
||||
}
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::record_timestamp() {
|
||||
void G1ContiguousSpace::record_timestamp() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
||||
|
||||
@ -935,17 +923,17 @@ void G1OffsetTableContigSpace::record_timestamp() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::record_retained_region() {
|
||||
void G1ContiguousSpace::record_retained_region() {
|
||||
// scan_top is the maximum address where it's safe for the next gc to
|
||||
// scan this region.
|
||||
_scan_top = top();
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||
void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||
object_iterate(blk);
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
|
||||
void G1ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
||||
HeapWord* p = bottom();
|
||||
while (p < top()) {
|
||||
if (block_is_obj(p)) {
|
||||
@ -955,17 +943,14 @@ void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
|
||||
}
|
||||
}
|
||||
|
||||
G1OffsetTableContigSpace::
|
||||
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
_offsets(sharedOffsetArray, mr),
|
||||
G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
|
||||
_bot_part(bot, this),
|
||||
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
|
||||
_gc_time_stamp(0)
|
||||
{
|
||||
_offsets.set_space(this);
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
||||
_top = bottom();
|
||||
_scan_top = bottom();
|
||||
|
@ -115,12 +115,12 @@ public:
|
||||
// The time stamps are re-initialized to zero at cleanup and at Full GCs.
|
||||
// The current scheme that uses sequential unsigned ints will fail only if we have 4b
|
||||
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
||||
class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
class G1ContiguousSpace: public CompactibleSpace {
|
||||
friend class VMStructs;
|
||||
HeapWord* volatile _top;
|
||||
HeapWord* volatile _scan_top;
|
||||
protected:
|
||||
G1BlockOffsetArrayContigSpace _offsets;
|
||||
G1BlockOffsetTablePart _bot_part;
|
||||
Mutex _par_alloc_lock;
|
||||
volatile unsigned _gc_time_stamp;
|
||||
// When we need to retire an allocation region, while other threads
|
||||
@ -132,14 +132,13 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
HeapWord* _pre_dummy_top;
|
||||
|
||||
public:
|
||||
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr);
|
||||
G1ContiguousSpace(G1BlockOffsetTable* bot);
|
||||
|
||||
void set_top(HeapWord* value) { _top = value; }
|
||||
HeapWord* top() const { return _top; }
|
||||
|
||||
protected:
|
||||
// Reset the G1OffsetTableContigSpace.
|
||||
// Reset the G1ContiguousSpace.
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
|
||||
HeapWord* volatile* top_addr() { return &_top; }
|
||||
@ -167,9 +166,6 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
|
||||
void set_bottom(HeapWord* value);
|
||||
void set_end(HeapWord* value);
|
||||
|
||||
void mangle_unused_area() PRODUCT_RETURN;
|
||||
void mangle_unused_area_complete() PRODUCT_RETURN;
|
||||
|
||||
@ -213,15 +209,15 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
virtual void print() const;
|
||||
|
||||
void reset_bot() {
|
||||
_offsets.reset_bot();
|
||||
_bot_part.reset_bot();
|
||||
}
|
||||
|
||||
void print_bot_on(outputStream* out) {
|
||||
_offsets.print_on(out);
|
||||
_bot_part.print_on(out);
|
||||
}
|
||||
};
|
||||
|
||||
class HeapRegion: public G1OffsetTableContigSpace {
|
||||
class HeapRegion: public G1ContiguousSpace {
|
||||
friend class VMStructs;
|
||||
// Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
|
||||
template <typename SpaceType>
|
||||
@ -233,8 +229,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// issues.)
|
||||
HeapRegionRemSet* _rem_set;
|
||||
|
||||
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
|
||||
|
||||
// Auxiliary functions for scan_and_forward support.
|
||||
// See comments for CompactibleSpace for more information.
|
||||
inline HeapWord* scan_limit() const {
|
||||
@ -330,7 +324,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
public:
|
||||
HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
G1BlockOffsetTable* bot,
|
||||
MemRegion mr);
|
||||
|
||||
// Initializing the HeapRegion not only resets the data structure, but also
|
||||
|
@ -32,9 +32,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
HeapWord* obj = top();
|
||||
size_t available = pointer_delta(end(), obj);
|
||||
size_t want_to_allocate = MIN2(available, desired_word_size);
|
||||
@ -49,9 +49,9 @@ inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size,
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
do {
|
||||
HeapWord* obj = top();
|
||||
size_t available = pointer_delta(end(), obj);
|
||||
@ -73,22 +73,22 @@ inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_siz
|
||||
} while (true);
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
|
||||
if (res != NULL) {
|
||||
_offsets.alloc_block(res, *actual_size);
|
||||
_bot_part.alloc_block(res, *actual_size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate(size_t word_size) {
|
||||
inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) {
|
||||
size_t temp;
|
||||
return allocate(word_size, word_size, &temp);
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) {
|
||||
inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) {
|
||||
size_t temp;
|
||||
return par_allocate(word_size, word_size, &temp);
|
||||
}
|
||||
@ -96,20 +96,20 @@ inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) {
|
||||
// Because of the requirement of keeping "_offsets" up to date with the
|
||||
// allocations, we sequentialize these with a lock. Therefore, best if
|
||||
// this is used for larger LAB allocations only.
|
||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_size) {
|
||||
MutexLocker x(&_par_alloc_lock);
|
||||
return allocate(min_word_size, desired_word_size, actual_size);
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
|
||||
return _offsets.block_start(p);
|
||||
inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
|
||||
return _bot_part.block_start(p);
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||
return _offsets.block_start_const(p);
|
||||
G1ContiguousSpace::block_start_const(const void* p) const {
|
||||
return _bot_part.block_start_const(p);
|
||||
}
|
||||
|
||||
inline bool
|
||||
|
@ -687,9 +687,9 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
|
||||
_sparse_table.do_cleanup_work(hrrs_cleanup_task);
|
||||
}
|
||||
|
||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
|
||||
HeapRegion* hr)
|
||||
: _bosa(bosa),
|
||||
: _bot(bot),
|
||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
|
||||
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
|
||||
reset_for_par_iteration();
|
||||
@ -728,8 +728,7 @@ void HeapRegionRemSet::print() {
|
||||
HeapRegionRemSetIterator iter(this);
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
HeapWord* card_start =
|
||||
G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
|
||||
HeapWord* card_start = _bot->address_for_index(card_index);
|
||||
tty->print_cr(" Card " PTR_FORMAT, p2i(card_start));
|
||||
}
|
||||
if (iter.n_yielded() != occupied()) {
|
||||
@ -825,7 +824,7 @@ HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
|
||||
_hrrs(hrrs),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_coarse_map(&hrrs->_other_regions._coarse_map),
|
||||
_bosa(hrrs->_bosa),
|
||||
_bot(hrrs->_bot),
|
||||
_is(Sparse),
|
||||
// Set these values so that we increment to the first region.
|
||||
_coarse_cur_region_index(-1),
|
||||
@ -852,7 +851,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||
_coarse_cur_region_cur_card = 0;
|
||||
HeapWord* r_bot =
|
||||
_g1h->region_at((uint) _coarse_cur_region_index)->bottom();
|
||||
_cur_region_card_offset = _bosa->index_for(r_bot);
|
||||
_cur_region_card_offset = _bot->index_for(r_bot);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -893,7 +892,7 @@ void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
|
||||
_fine_cur_prt = prt;
|
||||
|
||||
HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
|
||||
_cur_region_card_offset = _bosa->index_for(r_bot);
|
||||
_cur_region_card_offset = _bot->index_for(r_bot);
|
||||
|
||||
// The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
|
||||
// To avoid special-casing this start case, and not miss the first bitmap
|
||||
@ -1001,7 +1000,7 @@ void HeapRegionRemSet::test() {
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
HeapWord* card_start =
|
||||
G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
|
||||
G1CollectedHeap::heap()->bot()->address_for_index(card_index);
|
||||
tty->print_cr(" Card " PTR_FORMAT ".", p2i(card_start));
|
||||
sum++;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@
|
||||
// abstractly, in terms of what the "BlockOffsetTable" in use can parse.
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1BlockOffsetSharedArray;
|
||||
class G1BlockOffsetTable;
|
||||
class HeapRegion;
|
||||
class HeapRegionRemSetIterator;
|
||||
class PerRegionTable;
|
||||
@ -174,7 +174,7 @@ class HeapRegionRemSet : public CHeapObj<mtGC> {
|
||||
friend class HeapRegionRemSetIterator;
|
||||
|
||||
private:
|
||||
G1BlockOffsetSharedArray* _bosa;
|
||||
G1BlockOffsetTable* _bot;
|
||||
|
||||
// A set of code blobs (nmethods) whose code contains pointers into
|
||||
// the region that owns this RSet.
|
||||
@ -189,7 +189,7 @@ private:
|
||||
volatile size_t _iter_claimed;
|
||||
|
||||
public:
|
||||
HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr);
|
||||
HeapRegionRemSet(G1BlockOffsetTable* bot, HeapRegion* hr);
|
||||
|
||||
static void setup_remset_size();
|
||||
|
||||
@ -350,7 +350,7 @@ class HeapRegionRemSetIterator : public StackObj {
|
||||
// Local caching of HRRS fields.
|
||||
const BitMap* _coarse_map;
|
||||
|
||||
G1BlockOffsetSharedArray* _bosa;
|
||||
G1BlockOffsetTable* _bot;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The number of cards yielded since initialization.
|
||||
|
@ -381,17 +381,17 @@ void FreeRegionList_test() {
|
||||
MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
|
||||
// Allocate a fake BOT because the HeapRegion constructor initializes
|
||||
// the BOT.
|
||||
size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
|
||||
size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size());
|
||||
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
|
||||
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
|
||||
ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()));
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||
bot_rs.size(),
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
G1BlockOffsetSharedArray::N_bytes,
|
||||
G1BlockOffsetTable::N_bytes,
|
||||
mtGC);
|
||||
G1BlockOffsetSharedArray oa(heap, bot_storage);
|
||||
G1BlockOffsetTable bot(heap, bot_storage);
|
||||
bot_storage->commit_regions(0, num_regions_in_test);
|
||||
|
||||
// Set up memory regions for the heap regions.
|
||||
@ -401,11 +401,11 @@ void FreeRegionList_test() {
|
||||
MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
|
||||
MemRegion mr4(mr3.end(), HeapRegion::GrainWords);
|
||||
|
||||
HeapRegion hr0(0, &oa, mr0);
|
||||
HeapRegion hr1(1, &oa, mr1);
|
||||
HeapRegion hr2(2, &oa, mr2);
|
||||
HeapRegion hr3(3, &oa, mr3);
|
||||
HeapRegion hr4(4, &oa, mr4);
|
||||
HeapRegion hr0(0, &bot, mr0);
|
||||
HeapRegion hr1(1, &bot, mr1);
|
||||
HeapRegion hr2(2, &bot, mr2);
|
||||
HeapRegion hr3(3, &bot, mr3);
|
||||
HeapRegion hr4(4, &bot, mr4);
|
||||
l.add_ordered(&hr1);
|
||||
l.add_ordered(&hr0);
|
||||
l.add_ordered(&hr3);
|
||||
|
@ -35,7 +35,7 @@
|
||||
static_field(HeapRegion, GrainBytes, size_t) \
|
||||
static_field(HeapRegion, LogOfHRGrainBytes, int) \
|
||||
\
|
||||
nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord* volatile) \
|
||||
nonstatic_field(G1ContiguousSpace, _top, HeapWord* volatile) \
|
||||
\
|
||||
nonstatic_field(G1HeapRegionTable, _base, address) \
|
||||
nonstatic_field(G1HeapRegionTable, _length, size_t) \
|
||||
@ -96,8 +96,8 @@
|
||||
\
|
||||
declare_type(G1CollectedHeap, CollectedHeap) \
|
||||
\
|
||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||
declare_type(G1ContiguousSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1ContiguousSpace) \
|
||||
declare_toplevel_type(HeapRegionManager) \
|
||||
declare_toplevel_type(HeapRegionSetBase) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
|
@ -248,7 +248,7 @@ public:
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
class BlockOffsetArray: public BlockOffsetTable {
|
||||
friend class VMStructs;
|
||||
friend class G1BlockOffsetArray; // temp. until we restructure and cleanup
|
||||
friend class G1BlockOffsetTablePart; // temp. until we restructure and cleanup
|
||||
protected:
|
||||
// The following enums are used by do_block_internal() below
|
||||
enum Action {
|
||||
|
Loading…
Reference in New Issue
Block a user