8321013: Parallel: Refactor ObjectStartArray
Reviewed-by: tschatzl, sjohanss
This commit is contained in:
parent
afb896400b
commit
50baaf46b7
@ -25,32 +25,21 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
uint ObjectStartArray::_card_shift = 0;
|
||||
uint ObjectStartArray::_card_size = 0;
|
||||
uint ObjectStartArray::_card_size_in_words = 0;
|
||||
static size_t num_bytes_required(MemRegion mr) {
|
||||
assert(CardTable::is_card_aligned(mr.start()), "precondition");
|
||||
assert(CardTable::is_card_aligned(mr.end()), "precondition");
|
||||
|
||||
void ObjectStartArray::initialize_block_size(uint card_shift) {
|
||||
_card_shift = card_shift;
|
||||
_card_size = 1 << _card_shift;
|
||||
_card_size_in_words = _card_size / sizeof(HeapWord);
|
||||
return mr.word_size() / BOTConstants::card_size_in_words();
|
||||
}
|
||||
|
||||
void ObjectStartArray::initialize(MemRegion reserved_region) {
|
||||
// We're based on the assumption that we use the same
|
||||
// size blocks as the card table.
|
||||
assert(_card_size == CardTable::card_size(), "Sanity");
|
||||
assert(_card_size <= MaxBlockSize, "block_size must be less than or equal to " UINT32_FORMAT, MaxBlockSize);
|
||||
|
||||
// Calculate how much space must be reserved
|
||||
_reserved_region = reserved_region;
|
||||
|
||||
size_t bytes_to_reserve = reserved_region.word_size() / _card_size_in_words;
|
||||
size_t bytes_to_reserve = num_bytes_required(reserved_region);
|
||||
assert(bytes_to_reserve > 0, "Sanity");
|
||||
|
||||
bytes_to_reserve =
|
||||
@ -62,91 +51,96 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
|
||||
if (!backing_store.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
|
||||
}
|
||||
MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
|
||||
MemTracker::record_virtual_memory_type(backing_store.base(), mtGC);
|
||||
|
||||
// We do not commit any memory initially
|
||||
_virtual_space.initialize(backing_store);
|
||||
|
||||
_raw_base = (jbyte*)_virtual_space.low_boundary();
|
||||
assert(_raw_base != nullptr, "set from the backing_store");
|
||||
assert(_virtual_space.low_boundary() != nullptr, "set from the backing_store");
|
||||
|
||||
_offset_base = _raw_base - (size_t(reserved_region.start()) >> _card_shift);
|
||||
|
||||
_covered_region.set_start(reserved_region.start());
|
||||
_covered_region.set_word_size(0);
|
||||
|
||||
_blocks_region.set_start((HeapWord*)_raw_base);
|
||||
_blocks_region.set_word_size(0);
|
||||
_offset_base = (uint8_t*)(_virtual_space.low_boundary() - (uintptr_t(reserved_region.start()) >> BOTConstants::log_card_size()));
|
||||
}
|
||||
|
||||
void ObjectStartArray::set_covered_region(MemRegion mr) {
|
||||
assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
|
||||
assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
|
||||
|
||||
HeapWord* low_bound = mr.start();
|
||||
HeapWord* high_bound = mr.end();
|
||||
assert((uintptr_t(low_bound) & (_card_size - 1)) == 0, "heap must start at block boundary");
|
||||
assert((uintptr_t(high_bound) & (_card_size - 1)) == 0, "heap must end at block boundary");
|
||||
|
||||
size_t requested_blocks_size_in_bytes = mr.word_size() / _card_size_in_words;
|
||||
DEBUG_ONLY(_covered_region = mr;)
|
||||
|
||||
size_t requested_size = num_bytes_required(mr);
|
||||
// Only commit memory in page sized chunks
|
||||
requested_blocks_size_in_bytes =
|
||||
align_up(requested_blocks_size_in_bytes, os::vm_page_size());
|
||||
requested_size = align_up(requested_size, os::vm_page_size());
|
||||
|
||||
_covered_region = mr;
|
||||
size_t current_size = _virtual_space.committed_size();
|
||||
|
||||
size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
|
||||
if (requested_size == current_size) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
|
||||
if (requested_size > current_size) {
|
||||
// Expand
|
||||
size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
|
||||
size_t expand_by = requested_size - current_size;
|
||||
if (!_virtual_space.expand_by(expand_by)) {
|
||||
vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
|
||||
}
|
||||
// Clear *only* the newly allocated region
|
||||
memset(_blocks_region.end(), clean_block, expand_by);
|
||||
}
|
||||
|
||||
if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
|
||||
} else {
|
||||
// Shrink
|
||||
size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
|
||||
size_t shrink_by = current_size - requested_size;
|
||||
_virtual_space.shrink_by(shrink_by);
|
||||
}
|
||||
|
||||
_blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
|
||||
|
||||
assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
|
||||
assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
|
||||
assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
|
||||
assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
|
||||
}
|
||||
|
||||
void ObjectStartArray::reset() {
|
||||
memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
|
||||
static void fill_range(uint8_t* start, uint8_t* end, uint8_t v) {
|
||||
// + 1 for inclusive
|
||||
memset(start, v, pointer_delta(end, start, sizeof(uint8_t)) + 1);
|
||||
}
|
||||
|
||||
bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
|
||||
HeapWord* end_addr) const {
|
||||
assert(start_addr <= end_addr,
|
||||
"Range is wrong. start_addr (" PTR_FORMAT ") is after end_addr (" PTR_FORMAT ")",
|
||||
p2i(start_addr), p2i(end_addr));
|
||||
void ObjectStartArray::update_for_block_work(HeapWord* blk_start,
|
||||
HeapWord* blk_end) {
|
||||
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
|
||||
uint8_t* const offset_entry = entry_for_addr(cur_card_boundary);
|
||||
|
||||
assert(is_aligned(start_addr, _card_size), "precondition");
|
||||
// The first card holds the actual offset.
|
||||
*offset_entry = checked_cast<uint8_t>(pointer_delta(cur_card_boundary, blk_start));
|
||||
|
||||
if (start_addr == end_addr) {
|
||||
// No objects in empty range.
|
||||
return false;
|
||||
// Check if this block spans over other cards.
|
||||
uint8_t* const end_entry = entry_for_addr(blk_end - 1);
|
||||
assert(offset_entry <= end_entry, "inv");
|
||||
|
||||
if (offset_entry != end_entry) {
|
||||
// Handling remaining entries.
|
||||
uint8_t* start_entry_for_region = offset_entry + 1;
|
||||
for (uint i = 0; i < BOTConstants::N_powers; i++) {
|
||||
// -1 so that the reach ends in this region and not at the start
|
||||
// of the next.
|
||||
uint8_t* reach = offset_entry + BOTConstants::power_to_cards_back(i + 1) - 1;
|
||||
uint8_t value = checked_cast<uint8_t>(BOTConstants::card_size_in_words() + i);
|
||||
|
||||
fill_range(start_entry_for_region, MIN2(reach, end_entry), value);
|
||||
start_entry_for_region = reach + 1;
|
||||
|
||||
if (reach >= end_entry) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(start_entry_for_region > end_entry, "Sanity check");
|
||||
}
|
||||
|
||||
jbyte* start_block = block_for_addr(start_addr);
|
||||
jbyte* end_block = block_for_addr(end_addr - 1);
|
||||
debug_only(verify_for_block(blk_start, blk_end);)
|
||||
}
|
||||
|
||||
for (jbyte* block = start_block; block <= end_block; block++) {
|
||||
if (*block != clean_block) {
|
||||
return true;
|
||||
void ObjectStartArray::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {
|
||||
assert(is_crossing_card_boundary(blk_start, blk_end), "precondition");
|
||||
|
||||
const uint8_t* const start_entry = entry_for_addr(align_up_by_card_size(blk_start));
|
||||
const uint8_t* const end_entry = entry_for_addr(blk_end - 1);
|
||||
// Check entries in [start_entry, end_entry]
|
||||
assert(*start_entry < BOTConstants::card_size_in_words(), "offset entry");
|
||||
|
||||
for (const uint8_t* i = start_entry + 1; i <= end_entry; ++i) {
|
||||
const uint8_t prev = *(i-1);
|
||||
const uint8_t value = *i;
|
||||
if (prev != value) {
|
||||
assert(value >= prev, "monotonic");
|
||||
size_t n_cards_back = BOTConstants::entry_to_cards_back(value);
|
||||
assert(start_entry == (i - n_cards_back), "inv");
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP
|
||||
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
#include "gc/shared/blockOffsetTable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
@ -36,141 +37,65 @@
|
||||
//
|
||||
|
||||
class ObjectStartArray : public CHeapObj<mtGC> {
|
||||
friend class VerifyObjectStartArrayClosure;
|
||||
|
||||
private:
|
||||
PSVirtualSpace _virtual_space;
|
||||
MemRegion _reserved_region;
|
||||
// The committed (old-gen heap) virtual space this object-start-array covers.
|
||||
MemRegion _covered_region;
|
||||
MemRegion _blocks_region;
|
||||
jbyte* _raw_base;
|
||||
jbyte* _offset_base;
|
||||
DEBUG_ONLY(MemRegion _covered_region;)
|
||||
|
||||
static uint _card_shift;
|
||||
static uint _card_size;
|
||||
static uint _card_size_in_words;
|
||||
// BOT array
|
||||
PSVirtualSpace _virtual_space;
|
||||
|
||||
public:
|
||||
|
||||
enum BlockValueConstants {
|
||||
clean_block = -1
|
||||
};
|
||||
|
||||
// Maximum size an offset table entry can cover. This maximum is derived from that
|
||||
// we need an extra bit for possible offsets in the byte for backskip values, leaving 2^7 possible offsets.
|
||||
// Minimum object alignment is 8 bytes (2^3), so we can at most represent 2^10 offsets within a BOT value.
|
||||
static const uint MaxBlockSize = 1024;
|
||||
|
||||
// Initialize block size based on card size
|
||||
static void initialize_block_size(uint card_shift);
|
||||
|
||||
static uint card_shift() {
|
||||
return _card_shift;
|
||||
}
|
||||
|
||||
static uint card_size() {
|
||||
return _card_size;
|
||||
}
|
||||
static uint card_size_in_words() {
|
||||
return _card_size_in_words;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Biased array-start of BOT array for fast heap-addr / BOT entry translation
|
||||
uint8_t* _offset_base;
|
||||
|
||||
// Mapping from address to object start array entry
|
||||
jbyte* block_for_addr(void* p) const {
|
||||
uint8_t* entry_for_addr(const void* const p) const {
|
||||
assert(_covered_region.contains(p),
|
||||
"out of bounds access to object start array");
|
||||
jbyte* result = &_offset_base[uintptr_t(p) >> _card_shift];
|
||||
assert(_blocks_region.contains(result),
|
||||
"out of bounds result in byte_for");
|
||||
uint8_t* result = &_offset_base[uintptr_t(p) >> BOTConstants::log_card_size()];
|
||||
return result;
|
||||
}
|
||||
|
||||
// Mapping from object start array entry to address of first word
|
||||
HeapWord* addr_for_block(jbyte* p) {
|
||||
assert(_blocks_region.contains(p),
|
||||
"out of bounds access to object start array");
|
||||
size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
|
||||
HeapWord* result = (HeapWord*) (delta << _card_shift);
|
||||
HeapWord* addr_for_entry(const uint8_t* const p) const {
|
||||
size_t delta = pointer_delta(p, _offset_base, sizeof(uint8_t));
|
||||
HeapWord* result = (HeapWord*) (delta << BOTConstants::log_card_size());
|
||||
assert(_covered_region.contains(result),
|
||||
"out of bounds accessor from card marking array");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Mapping that includes the derived offset.
|
||||
// If the block is clean, returns the last address in the covered region.
|
||||
// If the block is < index 0, returns the start of the covered region.
|
||||
HeapWord* offset_addr_for_block(jbyte* p) const {
|
||||
// We have to do this before the assert
|
||||
if (p < _raw_base) {
|
||||
return _covered_region.start();
|
||||
}
|
||||
|
||||
assert(_blocks_region.contains(p),
|
||||
"out of bounds access to object start array");
|
||||
|
||||
if (*p == clean_block) {
|
||||
return _covered_region.end();
|
||||
}
|
||||
|
||||
size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
|
||||
HeapWord* result = (HeapWord*) (delta << _card_shift);
|
||||
result += *p;
|
||||
|
||||
assert(_covered_region.contains(result),
|
||||
"out of bounds accessor from card marking array");
|
||||
|
||||
return result;
|
||||
static HeapWord* align_up_by_card_size(HeapWord* const addr) {
|
||||
return align_up(addr, BOTConstants::card_size());
|
||||
}
|
||||
|
||||
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
void verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const;
|
||||
|
||||
public:
|
||||
|
||||
// This method is in lieu of a constructor, so that this class can be
|
||||
// embedded inline in other classes.
|
||||
void initialize(MemRegion reserved_region);
|
||||
|
||||
// Heap old-gen resizing
|
||||
void set_covered_region(MemRegion mr);
|
||||
|
||||
void reset();
|
||||
|
||||
MemRegion covered_region() { return _covered_region; }
|
||||
|
||||
#define assert_covered_region_contains(addr) \
|
||||
assert(_covered_region.contains(addr), \
|
||||
#addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]", \
|
||||
p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end()))
|
||||
|
||||
void allocate_block(HeapWord* p) {
|
||||
assert_covered_region_contains(p);
|
||||
jbyte* block = block_for_addr(p);
|
||||
HeapWord* block_base = addr_for_block(block);
|
||||
size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
|
||||
assert(offset < 128, "Sanity");
|
||||
// When doing MT offsets, we can't assert this.
|
||||
//assert(offset > *block, "Found backwards allocation");
|
||||
*block = (jbyte)offset;
|
||||
static bool is_crossing_card_boundary(HeapWord* const blk_start,
|
||||
HeapWord* const blk_end) {
|
||||
HeapWord* cur_card_boundary = align_up_by_card_size(blk_start);
|
||||
// Strictly greater-than, since we check if this block *crosses* card boundary.
|
||||
return blk_end > cur_card_boundary;
|
||||
}
|
||||
|
||||
// Optimized for finding the first object that crosses into
|
||||
// a given block. The blocks contain the offset of the last
|
||||
// object in that block. Scroll backwards by one, and the first
|
||||
// object hit should be at the beginning of the block
|
||||
inline HeapWord* object_start(HeapWord* addr) const;
|
||||
// Returns the address of the start of the block reaching into the card containing
|
||||
// "addr".
|
||||
inline HeapWord* block_start_reaching_into_card(HeapWord* const addr) const;
|
||||
|
||||
bool is_block_allocated(HeapWord* addr) {
|
||||
assert_covered_region_contains(addr);
|
||||
jbyte* block = block_for_addr(addr);
|
||||
return *block != clean_block;
|
||||
// [blk_start, blk_end) representing a block of memory in the heap.
|
||||
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (is_crossing_card_boundary(blk_start, blk_end)) {
|
||||
update_for_block_work(blk_start, blk_end);
|
||||
}
|
||||
}
|
||||
|
||||
// Return true iff an object starts in
|
||||
// [start_addr, end_addr_aligned_up)
|
||||
// where
|
||||
// end_addr_aligned_up = align_up(end_addr, _card_size)
|
||||
// Precondition: start_addr is card-size aligned
|
||||
bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
|
||||
inline HeapWord* object_start(HeapWord* const addr) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP
|
||||
|
@ -27,27 +27,41 @@
|
||||
|
||||
#include "gc/parallel/objectStartArray.hpp"
|
||||
|
||||
// Optimized for finding the first object that crosses into
|
||||
// a given block. The blocks contain the offset of the last
|
||||
// object in that block. Scroll backwards by one, and the first
|
||||
// object hit should be at the beginning of the block
|
||||
HeapWord* ObjectStartArray::object_start(HeapWord* addr) const {
|
||||
assert_covered_region_contains(addr);
|
||||
jbyte* block = block_for_addr(addr);
|
||||
HeapWord* scroll_forward = offset_addr_for_block(block--);
|
||||
while (scroll_forward > addr) {
|
||||
scroll_forward = offset_addr_for_block(block--);
|
||||
}
|
||||
HeapWord* ObjectStartArray::object_start(HeapWord* const addr) const {
|
||||
HeapWord* cur_block = block_start_reaching_into_card(addr);
|
||||
|
||||
HeapWord* next = scroll_forward;
|
||||
while (next <= addr) {
|
||||
scroll_forward = next;
|
||||
next += cast_to_oop(next)->size();
|
||||
while (true) {
|
||||
HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
|
||||
if (next_block > addr) {
|
||||
assert(cur_block <= addr, "postcondition");
|
||||
return cur_block;
|
||||
}
|
||||
// Because the BOT is precise, we should never step into the next card
|
||||
// (i.e. crossing the card boundary).
|
||||
assert(!is_crossing_card_boundary(next_block, addr), "must be");
|
||||
cur_block = next_block;
|
||||
}
|
||||
assert(scroll_forward <= addr, "wrong order for current and arg");
|
||||
assert(addr <= next, "wrong order for arg and next");
|
||||
return scroll_forward;
|
||||
}
|
||||
|
||||
HeapWord* ObjectStartArray::block_start_reaching_into_card(HeapWord* const addr) const {
|
||||
const uint8_t* entry = entry_for_addr(addr);
|
||||
|
||||
uint8_t offset;
|
||||
while (true) {
|
||||
offset = *entry;
|
||||
|
||||
if (offset < BOTConstants::card_size_in_words()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// The excess of the offset from N_words indicates a power of Base
|
||||
// to go back by.
|
||||
size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
|
||||
entry -= n_cards_back;
|
||||
}
|
||||
|
||||
HeapWord* q = addr_for_entry(entry);
|
||||
return q - offset;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_OBJECTSTARTARRAY_INLINE_HPP
|
||||
|
@ -92,10 +92,10 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
// If this wasn't true, a single card could span more than one generation,
|
||||
// which would cause problems when we commit/uncommit memory, and when we
|
||||
// clear and dirty cards.
|
||||
guarantee(ct->is_card_aligned(reserved_mr.start()), "generation must be card aligned");
|
||||
guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
|
||||
// Check the heap layout documented at `class ParallelScavengeHeap`.
|
||||
assert(reserved_mr.end() != heap->reserved_region().end(), "invariant");
|
||||
guarantee(ct->is_card_aligned(reserved_mr.end()), "generation must be card aligned");
|
||||
guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
|
||||
|
||||
//
|
||||
// ObjectSpace stuff
|
||||
@ -133,18 +133,14 @@ size_t PSOldGen::num_iterable_blocks() const {
|
||||
|
||||
void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
|
||||
size_t block_word_size = IterateBlockSize / HeapWordSize;
|
||||
assert((block_word_size % (ObjectStartArray::card_size())) == 0,
|
||||
"Block size not a multiple of start_array block");
|
||||
assert((block_word_size % BOTConstants::card_size_in_words()) == 0,
|
||||
"To ensure fast object_start calls");
|
||||
|
||||
MutableSpace *space = object_space();
|
||||
|
||||
HeapWord* begin = space->bottom() + block_index * block_word_size;
|
||||
HeapWord* end = MIN2(space->top(), begin + block_word_size);
|
||||
|
||||
if (!start_array()->object_starts_in_range(begin, end)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get object starting at or reaching into this block.
|
||||
HeapWord* start = start_array()->object_start(begin);
|
||||
if (start < begin) {
|
||||
@ -286,8 +282,8 @@ void PSOldGen::shrink(size_t bytes) {
|
||||
void PSOldGen::complete_loaded_archive_space(MemRegion archive_space) {
|
||||
HeapWord* cur = archive_space.start();
|
||||
while (cur < archive_space.end()) {
|
||||
_start_array.allocate_block(cur);
|
||||
size_t word_size = cast_to_oop(cur)->size();
|
||||
_start_array.update_for_block(cur, cur + word_size);
|
||||
cur += word_size;
|
||||
}
|
||||
}
|
||||
@ -390,14 +386,13 @@ void PSOldGen::verify() {
|
||||
class VerifyObjectStartArrayClosure : public ObjectClosure {
|
||||
ObjectStartArray* _start_array;
|
||||
|
||||
public:
|
||||
public:
|
||||
VerifyObjectStartArrayClosure(ObjectStartArray* start_array) :
|
||||
_start_array(start_array) { }
|
||||
|
||||
virtual void do_object(oop obj) {
|
||||
HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1;
|
||||
guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object");
|
||||
guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -51,29 +51,11 @@ class PSOldGen : public CHeapObj<mtGC> {
|
||||
// Block size for parallel iteration
|
||||
static const size_t IterateBlockSize = 1024 * 1024;
|
||||
|
||||
#ifdef ASSERT
|
||||
void assert_block_in_covered_region(MemRegion new_memregion) {
|
||||
// Explicitly capture current covered_region in a local
|
||||
MemRegion covered_region = this->start_array()->covered_region();
|
||||
assert(covered_region.contains(new_memregion),
|
||||
"new region is not in covered_region [ " PTR_FORMAT ", " PTR_FORMAT " ], "
|
||||
"new region [ " PTR_FORMAT ", " PTR_FORMAT " ], "
|
||||
"object space [ " PTR_FORMAT ", " PTR_FORMAT " ]",
|
||||
p2i(covered_region.start()),
|
||||
p2i(covered_region.end()),
|
||||
p2i(new_memregion.start()),
|
||||
p2i(new_memregion.end()),
|
||||
p2i(this->object_space()->used_region().start()),
|
||||
p2i(this->object_space()->used_region().end()));
|
||||
}
|
||||
#endif
|
||||
|
||||
HeapWord* cas_allocate_noexpand(size_t word_size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = object_space()->cas_allocate(word_size);
|
||||
if (res != nullptr) {
|
||||
DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
|
||||
_start_array.allocate_block(res);
|
||||
_start_array.update_for_block(res, res + word_size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -1496,7 +1496,7 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
||||
_mark_bitmap.mark_obj(obj_beg, obj_len);
|
||||
_summary_data.add_obj(obj_beg, obj_len);
|
||||
assert(start_array(id) != nullptr, "sanity");
|
||||
start_array(id)->allocate_block(obj_beg);
|
||||
start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2462,7 +2462,6 @@ void PSParallelCompact::compact() {
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
old_gen->start_array()->reset();
|
||||
uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
|
||||
|
||||
// for [0..last_space_id)
|
||||
@ -2534,7 +2533,7 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
|
||||
_start_array->allocate_block(addr);
|
||||
_start_array->update_for_block(addr, addr + cast_to_oop(addr)->size());
|
||||
compaction_manager()->update_contents(cast_to_oop(addr));
|
||||
}
|
||||
|
||||
@ -2627,7 +2626,7 @@ void PSParallelCompact::update_deferred_object(ParCompactionManager* cm, HeapWor
|
||||
const SpaceInfo* const space_info = _space_info + space_id(addr);
|
||||
ObjectStartArray* const start_array = space_info->start_array();
|
||||
if (start_array != nullptr) {
|
||||
start_array->allocate_block(addr);
|
||||
start_array->update_for_block(addr, addr + cast_to_oop(addr)->size());
|
||||
}
|
||||
|
||||
cm->update_contents(cast_to_oop(addr));
|
||||
@ -3133,7 +3132,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
|
||||
// The start_array must be updated even if the object is not moving.
|
||||
if (_start_array != nullptr) {
|
||||
_start_array->allocate_block(destination());
|
||||
_start_array->update_for_block(destination(), destination() + words);
|
||||
}
|
||||
|
||||
if (copy_destination() != source()) {
|
||||
@ -3197,8 +3196,9 @@ FillClosure::do_addr(HeapWord* addr, size_t size) {
|
||||
CollectedHeap::fill_with_objects(addr, size);
|
||||
HeapWord* const end = addr + size;
|
||||
do {
|
||||
_start_array->allocate_block(addr);
|
||||
addr += cast_to_oop(addr)->size();
|
||||
size_t size = cast_to_oop(addr)->size();
|
||||
_start_array->update_for_block(addr, addr + size);
|
||||
addr += size;
|
||||
} while (addr < end);
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
|
@ -115,7 +115,8 @@ void PSOldPromotionLAB::flush() {
|
||||
|
||||
assert(_start_array != nullptr, "Sanity");
|
||||
|
||||
_start_array->allocate_block(obj);
|
||||
// filler obj
|
||||
_start_array->update_for_block(obj, obj + cast_to_oop(obj)->size());
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -132,17 +133,11 @@ bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
|
||||
}
|
||||
|
||||
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
|
||||
assert(_start_array->covered_region().contains(lab), "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
MemRegion used = old_gen->object_space()->used_region();
|
||||
|
||||
if (used.contains(lab)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return used.contains(lab);
|
||||
}
|
||||
|
||||
#endif /* ASSERT */
|
||||
|
@ -120,7 +120,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
|
||||
set_top(new_top);
|
||||
assert(is_object_aligned(obj) && is_object_aligned(new_top),
|
||||
"checking alignment");
|
||||
_start_array->allocate_block(obj);
|
||||
_start_array->update_for_block(obj, obj + size);
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
@ -303,8 +303,8 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||
// If this wasn't true, a single card could span more than on generation,
|
||||
// which would cause problems when we commit/uncommit memory, and when we
|
||||
// clear and dirty cards.
|
||||
guarantee(_rs->is_card_aligned(reserved_mr.start()), "generation must be card aligned");
|
||||
guarantee(_rs->is_card_aligned(reserved_mr.end()), "generation must be card aligned");
|
||||
guarantee(CardTable::is_card_aligned(reserved_mr.start()), "generation must be card aligned");
|
||||
guarantee(CardTable::is_card_aligned(reserved_mr.end()), "generation must be card aligned");
|
||||
_min_heap_delta_bytes = MinHeapDeltaBytes;
|
||||
_capacity_at_prologue = initial_byte_size;
|
||||
_used_at_prologue = 0;
|
||||
|
@ -54,11 +54,6 @@ void CardTable::initialize_card_size() {
|
||||
// Set blockOffsetTable size based on card table entry size
|
||||
BOTConstants::initialize_bot_size(_card_shift);
|
||||
|
||||
#if INCLUDE_PARALLELGC
|
||||
// Set ObjectStartArray block size based on card table entry size
|
||||
ObjectStartArray::initialize_block_size(_card_shift);
|
||||
#endif
|
||||
|
||||
log_info_p(gc, init)("CardTable entry size: " UINT32_FORMAT, _card_size);
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ public:
|
||||
void clear_MemRegion(MemRegion mr);
|
||||
|
||||
// Return true if "p" is at the start of a card.
|
||||
bool is_card_aligned(HeapWord* p) {
|
||||
static bool is_card_aligned(HeapWord* p) {
|
||||
return is_aligned(p, card_size());
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user