8329203: Parallel: Investigate Mark-Compact for Full GC to decrease memory usage
Reviewed-by: rkennke, gli
This commit is contained in:
parent
1e5a2780d9
commit
94af3c23ea
@ -37,9 +37,6 @@ bool
|
||||
ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
{
|
||||
const idx_t bits = bits_required(covered_region);
|
||||
// The bits will be divided evenly between two bitmaps; each of them should be
|
||||
// an integral number of words.
|
||||
assert(is_aligned(bits, (BitsPerWord * 2)), "region size unaligned");
|
||||
|
||||
const size_t words = bits / BitsPerWord;
|
||||
const size_t raw_bytes = words * sizeof(idx_t);
|
||||
@ -61,8 +58,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
_region_start = covered_region.start();
|
||||
_region_size = covered_region.word_size();
|
||||
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
|
||||
_beg_bits = BitMapView(map, bits / 2);
|
||||
_end_bits = BitMapView(map + words / 2, bits / 2);
|
||||
_beg_bits = BitMapView(map, bits);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -77,176 +73,6 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
|
||||
{
|
||||
const idx_t beg_bit = addr_to_bit(addr);
|
||||
if (_beg_bits.par_set_bit(beg_bit)) {
|
||||
const idx_t end_bit = addr_to_bit(addr + size - 1);
|
||||
bool end_bit_ok = _end_bits.par_set_bit(end_bit);
|
||||
assert(end_bit_ok, "concurrency problem");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool
|
||||
ParMarkBitMap::is_live_words_in_range_in_cache(ParCompactionManager* cm, HeapWord* beg_addr) const {
|
||||
return cm->last_query_begin() == beg_addr;
|
||||
}
|
||||
|
||||
inline void
|
||||
ParMarkBitMap::update_live_words_in_range_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj, size_t result) const {
|
||||
cm->set_last_query_begin(beg_addr);
|
||||
cm->set_last_query_object(end_obj);
|
||||
cm->set_last_query_return(result);
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const
|
||||
{
|
||||
assert(beg_addr <= cast_from_oop<HeapWord*>(end_obj), "bad range");
|
||||
assert(is_marked(end_obj), "end_obj must be live");
|
||||
|
||||
idx_t live_bits = 0;
|
||||
|
||||
// The bitmap routines require the right boundary to be word-aligned.
|
||||
const idx_t end_bit = addr_to_bit(cast_from_oop<HeapWord*>(end_obj));
|
||||
const idx_t range_end = align_range_end(end_bit);
|
||||
|
||||
idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end);
|
||||
while (beg_bit < end_bit) {
|
||||
idx_t tmp_end = find_obj_end(beg_bit, range_end);
|
||||
assert(tmp_end < end_bit, "missing end bit");
|
||||
live_bits += tmp_end - beg_bit + 1;
|
||||
beg_bit = find_obj_beg(tmp_end + 1, range_end);
|
||||
}
|
||||
return bits_to_words(live_bits);
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const
|
||||
{
|
||||
HeapWord* last_beg = cm->last_query_begin();
|
||||
HeapWord* last_obj = cast_from_oop<HeapWord*>(cm->last_query_object());
|
||||
HeapWord* end_obj = cast_from_oop<HeapWord*>(end_oop);
|
||||
|
||||
size_t last_ret = cm->last_query_return();
|
||||
if (end_obj > last_obj) {
|
||||
last_ret = last_ret + live_words_in_range_helper(last_obj, end_oop);
|
||||
last_obj = end_obj;
|
||||
} else if (end_obj < last_obj) {
|
||||
// The cached value is for an object that is to the left (lower address) of the current
|
||||
// end_obj. Calculate back from that cached value.
|
||||
if (pointer_delta(end_obj, beg_addr) > pointer_delta(last_obj, end_obj)) {
|
||||
last_ret = last_ret - live_words_in_range_helper(end_obj, cast_to_oop(last_obj));
|
||||
} else {
|
||||
last_ret = live_words_in_range_helper(beg_addr, end_oop);
|
||||
}
|
||||
last_obj = end_obj;
|
||||
}
|
||||
|
||||
update_live_words_in_range_cache(cm, last_beg, cast_to_oop(last_obj), last_ret);
|
||||
return last_ret;
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const
|
||||
{
|
||||
// Try to reuse result from ParCompactionManager cache first.
|
||||
if (is_live_words_in_range_in_cache(cm, beg_addr)) {
|
||||
return live_words_in_range_use_cache(cm, beg_addr, end_obj);
|
||||
}
|
||||
size_t ret = live_words_in_range_helper(beg_addr, end_obj);
|
||||
update_live_words_in_range_cache(cm, beg_addr, end_obj, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ParMarkBitMap::IterationStatus
|
||||
ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
|
||||
idx_t range_beg, idx_t range_end) const
|
||||
{
|
||||
DEBUG_ONLY(verify_bit(range_beg);)
|
||||
DEBUG_ONLY(verify_bit(range_end);)
|
||||
assert(range_beg <= range_end, "live range invalid");
|
||||
|
||||
// The bitmap routines require the right boundary to be word-aligned.
|
||||
const idx_t search_end = align_range_end(range_end);
|
||||
|
||||
idx_t cur_beg = range_beg;
|
||||
while (true) {
|
||||
cur_beg = find_obj_beg(cur_beg, search_end);
|
||||
if (cur_beg >= range_end) {
|
||||
break;
|
||||
}
|
||||
|
||||
const size_t size = obj_size(cur_beg);
|
||||
IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size);
|
||||
if (status != incomplete) {
|
||||
assert(status == would_overflow || status == full, "sanity");
|
||||
return status;
|
||||
}
|
||||
|
||||
cur_beg += words_to_bits(size);
|
||||
if (cur_beg >= range_end) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return complete;
|
||||
}
|
||||
|
||||
ParMarkBitMap::IterationStatus
|
||||
ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
|
||||
ParMarkBitMapClosure* dead_closure,
|
||||
idx_t range_beg, idx_t range_end,
|
||||
idx_t dead_range_end) const
|
||||
{
|
||||
DEBUG_ONLY(verify_bit(range_beg);)
|
||||
DEBUG_ONLY(verify_bit(range_end);)
|
||||
DEBUG_ONLY(verify_bit(dead_range_end);)
|
||||
assert(range_beg <= range_end, "live range invalid");
|
||||
assert(range_end <= dead_range_end, "dead range invalid");
|
||||
|
||||
// The bitmap routines require the right boundary to be word-aligned.
|
||||
const idx_t dead_search_end = align_range_end(dead_range_end);
|
||||
|
||||
idx_t cur_beg = range_beg;
|
||||
if (range_beg < range_end && is_unmarked(range_beg)) {
|
||||
// The range starts with dead space. Look for the next object, then fill.
|
||||
// This must be the beginning of old/eden/from/to-space, so it's must be
|
||||
// large enough for a filler.
|
||||
cur_beg = find_obj_beg(range_beg + 1, dead_search_end);
|
||||
const idx_t dead_space_end = cur_beg - 1;
|
||||
const size_t size = obj_size(range_beg, dead_space_end);
|
||||
dead_closure->do_addr(bit_to_addr(range_beg), size);
|
||||
}
|
||||
|
||||
while (cur_beg < range_end) {
|
||||
const size_t size = obj_size(cur_beg);
|
||||
IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size);
|
||||
if (status != incomplete) {
|
||||
assert(status == would_overflow || status == full, "sanity");
|
||||
return status;
|
||||
}
|
||||
|
||||
const idx_t dead_space_beg = cur_beg + words_to_bits(size);
|
||||
if (dead_space_beg >= dead_search_end) {
|
||||
break;
|
||||
}
|
||||
// Look for the start of the next object.
|
||||
cur_beg = find_obj_beg(dead_space_beg, dead_search_end);
|
||||
if (cur_beg > dead_space_beg) {
|
||||
// Found dead space; compute the size and invoke the dead closure.
|
||||
const idx_t dead_space_end = cur_beg - 1;
|
||||
dead_closure->do_addr(bit_to_addr(dead_space_beg),
|
||||
obj_size(dead_space_beg, dead_space_end));
|
||||
}
|
||||
}
|
||||
|
||||
return complete;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void ParMarkBitMap::verify_clear() const
|
||||
{
|
||||
|
@ -39,18 +39,14 @@ public:
|
||||
typedef BitMap::idx_t idx_t;
|
||||
|
||||
// Values returned by the iterate() methods.
|
||||
enum IterationStatus { incomplete, complete, full, would_overflow };
|
||||
enum IterationStatus { incomplete, complete, full };
|
||||
|
||||
inline ParMarkBitMap();
|
||||
bool initialize(MemRegion covered_region);
|
||||
|
||||
// Atomically mark an object as live.
|
||||
bool mark_obj(HeapWord* addr, size_t size);
|
||||
inline bool mark_obj(oop obj, size_t size);
|
||||
|
||||
// Return whether the specified begin or end bit is set.
|
||||
inline bool is_obj_beg(idx_t bit) const;
|
||||
inline bool is_obj_end(idx_t bit) const;
|
||||
inline bool mark_obj(HeapWord* addr);
|
||||
inline bool mark_obj(oop obj);
|
||||
|
||||
// Traditional interface for testing whether an object is marked or not (these
|
||||
// test only the begin bits).
|
||||
@ -68,61 +64,6 @@ public:
|
||||
inline static size_t bits_to_words(idx_t bits);
|
||||
inline static idx_t words_to_bits(size_t words);
|
||||
|
||||
// Return the size in words of an object given a begin bit and an end bit, or
|
||||
// the equivalent beg_addr and end_addr.
|
||||
inline size_t obj_size(idx_t beg_bit, idx_t end_bit) const;
|
||||
inline size_t obj_size(HeapWord* beg_addr, HeapWord* end_addr) const;
|
||||
|
||||
// Return the size in words of the object (a search is done for the end bit).
|
||||
inline size_t obj_size(idx_t beg_bit) const;
|
||||
inline size_t obj_size(HeapWord* addr) const;
|
||||
|
||||
// Apply live_closure to each live object that lies completely within the
|
||||
// range [live_range_beg, live_range_end). This is used to iterate over the
|
||||
// compacted region of the heap. Return values:
|
||||
//
|
||||
// complete The iteration is complete. All objects in the range
|
||||
// were processed and the closure is not full;
|
||||
// closure->source() is set one past the end of the range.
|
||||
//
|
||||
// full The closure is full; closure->source() is set to one
|
||||
// past the end of the last object processed.
|
||||
//
|
||||
// would_overflow The next object in the range would overflow the closure;
|
||||
// closure->source() is set to the start of that object.
|
||||
IterationStatus iterate(ParMarkBitMapClosure* live_closure,
|
||||
idx_t range_beg, idx_t range_end) const;
|
||||
inline IterationStatus iterate(ParMarkBitMapClosure* live_closure,
|
||||
HeapWord* range_beg,
|
||||
HeapWord* range_end) const;
|
||||
|
||||
// Apply live closure as above and additionally apply dead_closure to all dead
|
||||
// space in the range [range_beg, dead_range_end). Note that dead_range_end
|
||||
// must be >= range_end. This is used to iterate over the dense prefix.
|
||||
//
|
||||
// This method assumes that if the first bit in the range (range_beg) is not
|
||||
// marked, then dead space begins at that point and the dead_closure is
|
||||
// applied. Thus callers must ensure that range_beg is not in the middle of a
|
||||
// live object.
|
||||
IterationStatus iterate(ParMarkBitMapClosure* live_closure,
|
||||
ParMarkBitMapClosure* dead_closure,
|
||||
idx_t range_beg, idx_t range_end,
|
||||
idx_t dead_range_end) const;
|
||||
inline IterationStatus iterate(ParMarkBitMapClosure* live_closure,
|
||||
ParMarkBitMapClosure* dead_closure,
|
||||
HeapWord* range_beg,
|
||||
HeapWord* range_end,
|
||||
HeapWord* dead_range_end) const;
|
||||
|
||||
// Return the number of live words in the range [beg_addr, end_obj) due to
|
||||
// objects that start in the range. If a live object extends onto the range,
|
||||
// the caller must detect and account for any live words due to that object.
|
||||
// If a live object extends beyond the end of the range, only the words within
|
||||
// the range are included in the result. The end of the range must be a live object,
|
||||
// which is the case when updating pointers. This allows a branch to be removed
|
||||
// from inside the loop.
|
||||
size_t live_words_in_range(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const;
|
||||
|
||||
inline HeapWord* region_start() const;
|
||||
inline HeapWord* region_end() const;
|
||||
inline size_t region_size() const;
|
||||
@ -141,11 +82,12 @@ public:
|
||||
// respectively) in the range [beg, end). If no object is found, return end.
|
||||
// end must be word-aligned.
|
||||
inline idx_t find_obj_beg(idx_t beg, idx_t end) const;
|
||||
inline idx_t find_obj_end(idx_t beg, idx_t end) const;
|
||||
|
||||
inline HeapWord* find_obj_beg(HeapWord* beg, HeapWord* end) const;
|
||||
inline HeapWord* find_obj_end(HeapWord* beg, HeapWord* end) const;
|
||||
|
||||
// Return the address of the last obj-start in the range [beg, end). If no
|
||||
// object is found, return end.
|
||||
inline HeapWord* find_obj_beg_reverse(HeapWord* beg, HeapWord* end) const;
|
||||
// Clear a range of bits or the entire bitmap (both begin and end bits are
|
||||
// cleared).
|
||||
inline void clear_range(idx_t beg, idx_t end);
|
||||
@ -158,7 +100,6 @@ public:
|
||||
void print_on_error(outputStream* st) const {
|
||||
st->print_cr("Marking Bits: (ParMarkBitMap*) " PTR_FORMAT, p2i(this));
|
||||
_beg_bits.print_on_error(st, " Begin Bits: ");
|
||||
_end_bits.print_on_error(st, " End Bits: ");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -168,11 +109,6 @@ public:
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
private:
|
||||
size_t live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const;
|
||||
|
||||
bool is_live_words_in_range_in_cache(ParCompactionManager* cm, HeapWord* beg_addr) const;
|
||||
size_t live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const;
|
||||
void update_live_words_in_range_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj, size_t result) const;
|
||||
|
||||
// Each bit in the bitmap represents one unit of 'object granularity.' Objects
|
||||
// are double-word aligned in 32-bit VMs, but not in 64-bit VMs, so the 32-bit
|
||||
@ -183,7 +119,6 @@ private:
|
||||
HeapWord* _region_start;
|
||||
size_t _region_size;
|
||||
BitMapView _beg_bits;
|
||||
BitMapView _end_bits;
|
||||
PSVirtualSpace* _virtual_space;
|
||||
size_t _reserved_byte_size;
|
||||
};
|
||||
|
@ -31,18 +31,15 @@
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
inline ParMarkBitMap::ParMarkBitMap():
|
||||
_region_start(nullptr), _region_size(0), _beg_bits(), _end_bits(), _virtual_space(nullptr), _reserved_byte_size(0)
|
||||
_region_start(nullptr), _region_size(0), _beg_bits(), _virtual_space(nullptr), _reserved_byte_size(0)
|
||||
{ }
|
||||
|
||||
inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end) {
|
||||
_beg_bits.clear_range(beg, end);
|
||||
_end_bits.clear_range(beg, end);
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::idx_t ParMarkBitMap::bits_required(size_t words) {
|
||||
// Need two bits (one begin bit, one end bit) for each unit of 'object
|
||||
// granularity' in the heap.
|
||||
return words_to_bits(words * 2);
|
||||
return words_to_bits(words);
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::idx_t ParMarkBitMap::bits_required(MemRegion covered_region) {
|
||||
@ -65,16 +62,8 @@ inline size_t ParMarkBitMap::size() const {
|
||||
return _beg_bits.size();
|
||||
}
|
||||
|
||||
inline bool ParMarkBitMap::is_obj_beg(idx_t bit) const {
|
||||
return _beg_bits.at(bit);
|
||||
}
|
||||
|
||||
inline bool ParMarkBitMap::is_obj_end(idx_t bit) const {
|
||||
return _end_bits.at(bit);
|
||||
}
|
||||
|
||||
inline bool ParMarkBitMap::is_marked(idx_t bit) const {
|
||||
return is_obj_beg(bit);
|
||||
return _beg_bits.at(bit);
|
||||
}
|
||||
|
||||
inline bool ParMarkBitMap::is_marked(HeapWord* addr) const {
|
||||
@ -105,47 +94,12 @@ inline ParMarkBitMap::idx_t ParMarkBitMap::words_to_bits(size_t words) {
|
||||
return words >> obj_granularity_shift();
|
||||
}
|
||||
|
||||
inline size_t ParMarkBitMap::obj_size(idx_t beg_bit, idx_t end_bit) const {
|
||||
DEBUG_ONLY(verify_bit(beg_bit);)
|
||||
DEBUG_ONLY(verify_bit(end_bit);)
|
||||
return bits_to_words(end_bit - beg_bit + 1);
|
||||
inline bool ParMarkBitMap::mark_obj(HeapWord* addr) {
|
||||
return _beg_bits.par_set_bit(addr_to_bit(addr));
|
||||
}
|
||||
|
||||
inline size_t ParMarkBitMap::obj_size(HeapWord* beg_addr, HeapWord* end_addr) const {
|
||||
DEBUG_ONLY(verify_addr(beg_addr);)
|
||||
DEBUG_ONLY(verify_addr(end_addr);)
|
||||
return pointer_delta(end_addr, beg_addr) + obj_granularity();
|
||||
}
|
||||
|
||||
inline size_t ParMarkBitMap::obj_size(idx_t beg_bit) const {
|
||||
const idx_t end_bit = _end_bits.find_first_set_bit(beg_bit, size());
|
||||
assert(is_marked(beg_bit), "obj not marked");
|
||||
assert(end_bit < size(), "end bit missing");
|
||||
return obj_size(beg_bit, end_bit);
|
||||
}
|
||||
|
||||
inline size_t ParMarkBitMap::obj_size(HeapWord* addr) const {
|
||||
return obj_size(addr_to_bit(addr));
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::IterationStatus ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
|
||||
HeapWord* range_beg,
|
||||
HeapWord* range_end) const {
|
||||
return iterate(live_closure, addr_to_bit(range_beg), addr_to_bit(range_end));
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::IterationStatus ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
|
||||
ParMarkBitMapClosure* dead_closure,
|
||||
HeapWord* range_beg,
|
||||
HeapWord* range_end,
|
||||
HeapWord* dead_range_end) const {
|
||||
return iterate(live_closure, dead_closure,
|
||||
addr_to_bit(range_beg), addr_to_bit(range_end),
|
||||
addr_to_bit(dead_range_end));
|
||||
}
|
||||
|
||||
inline bool ParMarkBitMap::mark_obj(oop obj, size_t size) {
|
||||
return mark_obj(cast_from_oop<HeapWord*>(obj), size);
|
||||
inline bool ParMarkBitMap::mark_obj(oop obj) {
|
||||
return mark_obj(cast_from_oop<HeapWord*>(obj));
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::idx_t ParMarkBitMap::addr_to_bit(HeapWord* addr) const {
|
||||
@ -168,10 +122,6 @@ inline ParMarkBitMap::idx_t ParMarkBitMap::find_obj_beg(idx_t beg, idx_t end) co
|
||||
return _beg_bits.find_first_set_bit_aligned_right(beg, end);
|
||||
}
|
||||
|
||||
inline ParMarkBitMap::idx_t ParMarkBitMap::find_obj_end(idx_t beg, idx_t end) const {
|
||||
return _end_bits.find_first_set_bit_aligned_right(beg, end);
|
||||
}
|
||||
|
||||
inline HeapWord* ParMarkBitMap::find_obj_beg(HeapWord* beg, HeapWord* end) const {
|
||||
const idx_t beg_bit = addr_to_bit(beg);
|
||||
const idx_t end_bit = addr_to_bit(end);
|
||||
@ -180,11 +130,10 @@ inline HeapWord* ParMarkBitMap::find_obj_beg(HeapWord* beg, HeapWord* end) const
|
||||
return bit_to_addr(res_bit);
|
||||
}
|
||||
|
||||
inline HeapWord* ParMarkBitMap::find_obj_end(HeapWord* beg, HeapWord* end) const {
|
||||
inline HeapWord* ParMarkBitMap::find_obj_beg_reverse(HeapWord* beg, HeapWord* end) const {
|
||||
const idx_t beg_bit = addr_to_bit(beg);
|
||||
const idx_t end_bit = addr_to_bit(end);
|
||||
const idx_t search_end = align_range_end(end_bit);
|
||||
const idx_t res_bit = MIN2(find_obj_end(beg_bit, search_end), end_bit);
|
||||
const idx_t res_bit = _beg_bits.find_last_set_bit_aligned_left(beg_bit, end_bit);
|
||||
return bit_to_addr(res_bit);
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/parallel/psCompactionManager.inline.hpp"
|
||||
#include "gc/parallel/psOldGen.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
@ -51,16 +52,16 @@ ParMarkBitMap* ParCompactionManager::_mark_bitmap = nullptr;
|
||||
GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = nullptr;
|
||||
Monitor* ParCompactionManager::_shadow_region_monitor = nullptr;
|
||||
|
||||
ParCompactionManager::ParCompactionManager() {
|
||||
PreservedMarksSet* ParCompactionManager::_preserved_marks_set = nullptr;
|
||||
|
||||
ParCompactionManager::ParCompactionManager(PreservedMarks* preserved_marks) {
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
_old_gen = heap->old_gen();
|
||||
_start_array = old_gen()->start_array();
|
||||
|
||||
reset_bitmap_query_cache();
|
||||
|
||||
_deferred_obj_array = new (mtGC) GrowableArray<HeapWord*>(10, mtGC);
|
||||
_preserved_marks = preserved_marks;
|
||||
_marking_stats_cache = nullptr;
|
||||
}
|
||||
|
||||
@ -79,9 +80,12 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
_objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
|
||||
_region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
|
||||
|
||||
_preserved_marks_set = new PreservedMarksSet(true);
|
||||
_preserved_marks_set->init(parallel_gc_threads);
|
||||
|
||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
_manager_array[i] = new ParCompactionManager();
|
||||
_manager_array[i] = new ParCompactionManager(_preserved_marks_set->get(i));
|
||||
oop_task_queues()->register_queue(i, _manager_array[i]->oop_stack());
|
||||
_objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
|
||||
region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
|
||||
@ -93,13 +97,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
_shadow_region_array = new (mtGC) GrowableArray<size_t >(10, mtGC);
|
||||
|
||||
_shadow_region_monitor = new Monitor(Mutex::nosafepoint, "CompactionManager_lock");
|
||||
}
|
||||
|
||||
void ParCompactionManager::reset_all_bitmap_query_caches() {
|
||||
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
|
||||
for (uint i=0; i<parallel_gc_threads; i++) {
|
||||
_manager_array[i]->reset_bitmap_query_cache();
|
||||
}
|
||||
}
|
||||
|
||||
void ParCompactionManager::flush_all_string_dedup_requests() {
|
||||
@ -168,15 +166,6 @@ void ParCompactionManager::drain_region_stacks() {
|
||||
} while (!region_stack()->is_empty());
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_deferred_objects() {
|
||||
while (!_deferred_obj_array->is_empty()) {
|
||||
HeapWord* addr = _deferred_obj_array->pop();
|
||||
assert(addr != nullptr, "expected a deferred object");
|
||||
PSParallelCompact::update_deferred_object(this, addr);
|
||||
}
|
||||
_deferred_obj_array->clear_and_deallocate();
|
||||
}
|
||||
|
||||
size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
|
||||
MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
|
||||
while (true) {
|
||||
@ -207,10 +196,6 @@ void ParCompactionManager::remove_all_shadow_regions() {
|
||||
_shadow_region_array->clear();
|
||||
}
|
||||
|
||||
void ParCompactionManager::push_deferred_object(HeapWord* addr) {
|
||||
_deferred_obj_array->push(addr);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void ParCompactionManager::verify_all_marking_stack_empty() {
|
||||
uint parallel_gc_threads = ParallelGCThreads;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP
|
||||
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "gc/shared/stringdedup/stringDedup.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
@ -45,7 +46,7 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
||||
friend class ParallelScavengeRefProcProxyTask;
|
||||
friend class ParMarkBitMap;
|
||||
friend class PSParallelCompact;
|
||||
friend class UpdateDensePrefixAndCompactionTask;
|
||||
friend class FillDensePrefixAndCompactionTask;
|
||||
|
||||
private:
|
||||
typedef OverflowTaskQueue<oop, mtGC> OopTaskQueue;
|
||||
@ -75,7 +76,8 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
||||
// type of TaskQueue.
|
||||
RegionTaskQueue _region_stack;
|
||||
|
||||
GrowableArray<HeapWord*>* _deferred_obj_array;
|
||||
static PreservedMarksSet* _preserved_marks_set;
|
||||
PreservedMarks* _preserved_marks;
|
||||
|
||||
static ParMarkBitMap* _mark_bitmap;
|
||||
|
||||
@ -87,10 +89,6 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
||||
// See pop/push_shadow_region_mt_safe() below
|
||||
static Monitor* _shadow_region_monitor;
|
||||
|
||||
HeapWord* _last_query_beg;
|
||||
oop _last_query_obj;
|
||||
size_t _last_query_ret;
|
||||
|
||||
StringDedup::Requests _string_dedup_requests;
|
||||
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
@ -106,7 +104,7 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
||||
// objArray stack, otherwise returns false and the task is invalid.
|
||||
bool publish_or_pop_objarray_tasks(ObjArrayTask& task);
|
||||
|
||||
ParCompactionManager();
|
||||
ParCompactionManager(PreservedMarks* preserved_marks);
|
||||
// Array of task queues. Needed by the task terminator.
|
||||
static RegionTaskQueueSet* region_task_queues() { return _region_task_queues; }
|
||||
OopTaskQueue* oop_stack() { return &_oop_stack; }
|
||||
@ -153,29 +151,10 @@ public:
|
||||
return next_shadow_region();
|
||||
}
|
||||
|
||||
void push_deferred_object(HeapWord* addr);
|
||||
|
||||
void reset_bitmap_query_cache() {
|
||||
_last_query_beg = nullptr;
|
||||
_last_query_obj = nullptr;
|
||||
_last_query_ret = 0;
|
||||
}
|
||||
|
||||
void flush_string_dedup_requests() {
|
||||
_string_dedup_requests.flush();
|
||||
}
|
||||
|
||||
// Bitmap query support, cache last query and result
|
||||
HeapWord* last_query_begin() { return _last_query_beg; }
|
||||
oop last_query_object() { return _last_query_obj; }
|
||||
size_t last_query_return() { return _last_query_ret; }
|
||||
|
||||
void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
|
||||
void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
|
||||
void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
|
||||
|
||||
static void reset_all_bitmap_query_caches();
|
||||
|
||||
static void flush_all_string_dedup_requests();
|
||||
|
||||
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||
@ -184,6 +163,9 @@ public:
|
||||
// Simply use the first compaction manager here.
|
||||
static ParCompactionManager* get_vmthread_cm() { return _manager_array[0]; }
|
||||
|
||||
PreservedMarks* preserved_marks() const {
|
||||
return _preserved_marks;
|
||||
}
|
||||
|
||||
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
|
||||
|
||||
@ -208,13 +190,10 @@ public:
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_region_stacks();
|
||||
void drain_deferred_objects();
|
||||
|
||||
void follow_contents(oop obj);
|
||||
void follow_array(objArrayOop array, int index);
|
||||
|
||||
void update_contents(oop obj);
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
|
@ -157,13 +157,6 @@ inline void ParCompactionManager::follow_array(objArrayOop obj, int index) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::update_contents(oop obj) {
|
||||
if (!obj->klass()->is_typeArray_klass()) {
|
||||
PCAdjustPointerClosure apc(this);
|
||||
obj->oop_iterate(&apc);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::follow_contents(oop obj) {
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
|
||||
PCIterateMarkAndPushClosure cl(this, PSParallelCompact::ref_processor());
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -215,17 +215,6 @@ public:
|
||||
// Mask for the bits in a pointer to get the address of the start of a region.
|
||||
static const size_t RegionAddrMask;
|
||||
|
||||
static const size_t Log2BlockSize;
|
||||
static const size_t BlockSize;
|
||||
static const size_t BlockSizeBytes;
|
||||
|
||||
static const size_t BlockSizeOffsetMask;
|
||||
static const size_t BlockAddrOffsetMask;
|
||||
static const size_t BlockAddrMask;
|
||||
|
||||
static const size_t BlocksPerRegion;
|
||||
static const size_t Log2BlocksPerRegion;
|
||||
|
||||
class RegionData
|
||||
{
|
||||
public:
|
||||
@ -274,12 +263,6 @@ public:
|
||||
inline uint destination_count() const;
|
||||
inline uint destination_count_raw() const;
|
||||
|
||||
// Whether the block table for this region has been filled.
|
||||
inline bool blocks_filled() const;
|
||||
|
||||
// Number of times the block table was filled.
|
||||
DEBUG_ONLY(inline size_t blocks_filled_count() const;)
|
||||
|
||||
// Whether this region is available to be claimed, has been claimed, or has
|
||||
// been completed.
|
||||
//
|
||||
@ -298,7 +281,6 @@ public:
|
||||
void set_partial_obj_size(size_t words) {
|
||||
_partial_obj_size = (region_sz_t) words;
|
||||
}
|
||||
inline void set_blocks_filled();
|
||||
|
||||
inline void set_destination_count(uint count);
|
||||
inline void set_live_obj_size(size_t words);
|
||||
@ -356,13 +338,8 @@ public:
|
||||
HeapWord* _partial_obj_addr;
|
||||
region_sz_t _partial_obj_size;
|
||||
region_sz_t volatile _dc_and_los;
|
||||
bool volatile _blocks_filled;
|
||||
int volatile _shadow_state;
|
||||
|
||||
#ifdef ASSERT
|
||||
size_t _blocks_filled_count; // Number of block table fills.
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
#ifdef ASSERT
|
||||
public:
|
||||
uint _pushed; // 0 until region is pushed onto a stack
|
||||
@ -370,21 +347,6 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
// "Blocks" allow shorter sections of the bitmap to be searched. Each Block
|
||||
// holds an offset, which is the amount of live data in the Region to the left
|
||||
// of the first live object that starts in the Block.
|
||||
class BlockData
|
||||
{
|
||||
public:
|
||||
typedef unsigned short int blk_ofs_t;
|
||||
|
||||
blk_ofs_t offset() const { return _offset; }
|
||||
void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
|
||||
|
||||
private:
|
||||
blk_ofs_t _offset;
|
||||
};
|
||||
|
||||
public:
|
||||
ParallelCompactData();
|
||||
bool initialize(MemRegion reserved_heap);
|
||||
@ -396,9 +358,6 @@ public:
|
||||
inline RegionData* region(size_t region_idx) const;
|
||||
inline size_t region(const RegionData* const region_ptr) const;
|
||||
|
||||
size_t block_count() const { return _block_count; }
|
||||
inline BlockData* block(size_t block_idx) const;
|
||||
|
||||
// Fill in the regions covering [beg, end) so that no data moves; i.e., the
|
||||
// destination of region n is simply the start of region n. Both arguments
|
||||
// beg and end must be region-aligned.
|
||||
@ -436,28 +395,12 @@ public:
|
||||
inline HeapWord* region_align_up(HeapWord* addr) const;
|
||||
inline bool is_region_aligned(HeapWord* addr) const;
|
||||
|
||||
size_t addr_to_block_idx(const HeapWord* addr) const;
|
||||
inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
|
||||
|
||||
inline HeapWord* block_align_down(HeapWord* addr) const;
|
||||
|
||||
// Return the address one past the end of the partial object.
|
||||
HeapWord* partial_obj_end(size_t region_idx) const;
|
||||
|
||||
// Return the location of the object after compaction.
|
||||
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const;
|
||||
|
||||
HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) const {
|
||||
return calc_new_pointer(cast_from_oop<HeapWord*>(p), cm);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_clear(const PSVirtualSpace* vspace);
|
||||
void verify_clear();
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
private:
|
||||
bool initialize_block_data();
|
||||
bool initialize_region_data(size_t heap_size);
|
||||
PSVirtualSpace* create_vspace(size_t count, size_t element_size);
|
||||
|
||||
@ -470,10 +413,6 @@ private:
|
||||
size_t _reserved_byte_size;
|
||||
RegionData* _region_data;
|
||||
size_t _region_count;
|
||||
|
||||
PSVirtualSpace* _block_vspace;
|
||||
BlockData* _block_data;
|
||||
size_t _block_count;
|
||||
};
|
||||
|
||||
inline uint
|
||||
@ -488,31 +427,6 @@ ParallelCompactData::RegionData::destination_count() const
|
||||
return destination_count_raw() >> dc_shift;
|
||||
}
|
||||
|
||||
inline bool
|
||||
ParallelCompactData::RegionData::blocks_filled() const
|
||||
{
|
||||
bool result = _blocks_filled;
|
||||
OrderAccess::acquire();
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
inline size_t
|
||||
ParallelCompactData::RegionData::blocks_filled_count() const
|
||||
{
|
||||
return _blocks_filled_count;
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
inline void
|
||||
ParallelCompactData::RegionData::set_blocks_filled()
|
||||
{
|
||||
OrderAccess::release();
|
||||
_blocks_filled = true;
|
||||
// Debug builds count the number of times the table was filled.
|
||||
DEBUG_ONLY(Atomic::inc(&_blocks_filled_count));
|
||||
}
|
||||
|
||||
inline void
|
||||
ParallelCompactData::RegionData::set_destination_count(uint count)
|
||||
{
|
||||
@ -602,12 +516,6 @@ ParallelCompactData::region(const RegionData* const region_ptr) const
|
||||
return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
|
||||
}
|
||||
|
||||
inline ParallelCompactData::BlockData*
|
||||
ParallelCompactData::block(size_t n) const {
|
||||
assert(n < block_count(), "bad arg");
|
||||
return _block_data + n;
|
||||
}
|
||||
|
||||
inline size_t
|
||||
ParallelCompactData::region_offset(const HeapWord* addr) const
|
||||
{
|
||||
@ -667,28 +575,6 @@ ParallelCompactData::is_region_aligned(HeapWord* addr) const
|
||||
return (size_t(addr) & RegionAddrOffsetMask) == 0;
|
||||
}
|
||||
|
||||
inline size_t
|
||||
ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
|
||||
{
|
||||
assert(addr >= _heap_start, "bad addr");
|
||||
assert(addr <= _heap_end, "bad addr");
|
||||
return pointer_delta(addr, _heap_start) >> Log2BlockSize;
|
||||
}
|
||||
|
||||
inline ParallelCompactData::BlockData*
|
||||
ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
|
||||
{
|
||||
return block(addr_to_block_idx(addr));
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
ParallelCompactData::block_align_down(HeapWord* addr) const
|
||||
{
|
||||
assert(addr >= _heap_start, "bad addr");
|
||||
assert(addr < _heap_end + RegionSize, "bad addr");
|
||||
return (HeapWord*)(size_t(addr) & BlockAddrMask);
|
||||
}
|
||||
|
||||
// Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
|
||||
// do_addr() method.
|
||||
//
|
||||
@ -774,25 +660,24 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
|
||||
// does parts of the collection using parallel threads. The collection includes
|
||||
// the tenured generation and the young generation.
|
||||
//
|
||||
// There are four phases of the collection.
|
||||
// A collection consists of the following phases.
|
||||
//
|
||||
// - marking phase
|
||||
// - summary phase
|
||||
// - summary phase (single-threaded)
|
||||
// - forward (to new address) phase
|
||||
// - adjust pointers phase
|
||||
// - compacting phase
|
||||
// - clean up phase
|
||||
//
|
||||
// Roughly speaking these phases correspond, respectively, to
|
||||
//
|
||||
// - mark all the live objects
|
||||
// - calculating destination-region for each region for better parallellism in following phases
|
||||
// - calculate the destination of each object at the end of the collection
|
||||
// - adjust pointers to reflect new destination of objects
|
||||
// - move the objects to their destination
|
||||
// - update some references and reinitialize some variables
|
||||
//
|
||||
// These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
|
||||
// marking phase is implemented in PSParallelCompact::marking_phase() and does a
|
||||
// complete marking of the heap. The summary phase is implemented in
|
||||
// PSParallelCompact::summary_phase(). The move and update phase is implemented
|
||||
// in PSParallelCompact::compact().
|
||||
//
|
||||
// A space that is being collected is divided into regions and with each region
|
||||
// is associated an object of type ParallelCompactData. Each region is of a
|
||||
// fixed size and typically will contain more than 1 object and may have parts
|
||||
@ -828,17 +713,12 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
|
||||
// dense prefix do need to have their object references updated. See method
|
||||
// summarize_dense_prefix().
|
||||
//
|
||||
// The summary phase is done using 1 GC thread.
|
||||
// The forward (to new address) phase calculates the new address of each
|
||||
// objects and records old-addr-to-new-addr asssociation.
|
||||
//
|
||||
// The compaction phase moves objects to their new location and updates all
|
||||
// references in the object.
|
||||
// The adjust pointers phase remap all pointers to reflect the new address of each object.
|
||||
//
|
||||
// A current exception is that objects that cross a region boundary are moved
|
||||
// but do not have their references updated. References are not updated because
|
||||
// it cannot easily be determined if the klass pointer KKK for the object AAA
|
||||
// has been updated. KKK likely resides in a region to the left of the region
|
||||
// containing AAA. These AAA's have their references updated at the end in a
|
||||
// clean up phase. See the method PSParallelCompact::update_deferred_object().
|
||||
// The compaction phase moves objects to their new location.
|
||||
//
|
||||
// Compaction is done on a region basis. A region that is ready to be filled is
|
||||
// put on a ready list and GC threads take region off the list and fill them. A
|
||||
@ -869,39 +749,18 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
|
||||
// Environments (VEE 2019). ACM, New York, NY, USA, 108-121. DOI:
|
||||
// https://doi.org/10.1145/3313808.3313820
|
||||
|
||||
class TaskQueue;
|
||||
|
||||
class PSParallelCompact : AllStatic {
|
||||
public:
|
||||
// Convenient access to type names.
|
||||
typedef ParMarkBitMap::idx_t idx_t;
|
||||
typedef ParallelCompactData::RegionData RegionData;
|
||||
typedef ParallelCompactData::BlockData BlockData;
|
||||
|
||||
typedef enum {
|
||||
old_space_id, eden_space_id,
|
||||
from_space_id, to_space_id, last_space_id
|
||||
} SpaceId;
|
||||
|
||||
struct UpdateDensePrefixTask : public CHeapObj<mtGC> {
|
||||
SpaceId _space_id;
|
||||
size_t _region_index_start;
|
||||
size_t _region_index_end;
|
||||
|
||||
UpdateDensePrefixTask() :
|
||||
_space_id(SpaceId(0)),
|
||||
_region_index_start(0),
|
||||
_region_index_end(0) {}
|
||||
|
||||
UpdateDensePrefixTask(SpaceId space_id,
|
||||
size_t region_index_start,
|
||||
size_t region_index_end) :
|
||||
_space_id(space_id),
|
||||
_region_index_start(region_index_start),
|
||||
_region_index_end(region_index_end) {}
|
||||
};
|
||||
|
||||
public:
|
||||
public:
|
||||
// Inline closure decls
|
||||
//
|
||||
class IsAliveClosure: public BoolObjectClosure {
|
||||
@ -909,7 +768,6 @@ class PSParallelCompact : AllStatic {
|
||||
virtual bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
friend class RefProcTaskProxy;
|
||||
friend class PSParallelCompactTest;
|
||||
|
||||
private:
|
||||
@ -958,10 +816,11 @@ class PSParallelCompact : AllStatic {
|
||||
|
||||
static void summary_phase(bool maximum_compaction);
|
||||
|
||||
// Adjust addresses in roots. Does not adjust addresses in heap.
|
||||
static void adjust_roots();
|
||||
static void adjust_pointers();
|
||||
static void forward_to_new_addr();
|
||||
|
||||
DEBUG_ONLY(static void write_block_fill_histogram();)
|
||||
static void verify_forward() NOT_DEBUG_RETURN;
|
||||
static void verify_filler_in_dense_prefix() NOT_DEBUG_RETURN;
|
||||
|
||||
// Move objects to new locations.
|
||||
static void compact();
|
||||
@ -969,10 +828,6 @@ class PSParallelCompact : AllStatic {
|
||||
// Add available regions to the stack and draining tasks to the task queue.
|
||||
static void prepare_region_draining_tasks(uint parallel_gc_threads);
|
||||
|
||||
// Add dense prefix update tasks to the task queue.
|
||||
static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
|
||||
uint parallel_gc_threads);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Print generic summary data
|
||||
static void print_generic_summary_data(ParallelCompactData& summary_data,
|
||||
@ -980,10 +835,23 @@ class PSParallelCompact : AllStatic {
|
||||
HeapWord* const end_addr);
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
static void fill_range_in_dense_prefix(HeapWord* start, HeapWord* end);
|
||||
|
||||
public:
|
||||
static void fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers);
|
||||
|
||||
static bool invoke(bool maximum_heap_compaction);
|
||||
static bool invoke_no_policy(bool maximum_heap_compaction);
|
||||
|
||||
template<typename Func>
|
||||
static void adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe);
|
||||
|
||||
static void adjust_in_old_space(volatile uint* claim_counter);
|
||||
|
||||
static void adjust_in_young_space(SpaceId id, volatile uint* claim_counter);
|
||||
|
||||
static void adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counter);
|
||||
|
||||
static void post_initialize();
|
||||
// Perform initialization for PSParallelCompact that requires
|
||||
// allocations. This should be called during the VM initialization
|
||||
@ -1003,7 +871,7 @@ class PSParallelCompact : AllStatic {
|
||||
static inline bool mark_obj(oop obj);
|
||||
static inline bool is_marked(oop obj);
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p, ParCompactionManager* cm);
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
|
||||
// Compaction support.
|
||||
// Return true if p is in the range [beg_addr, end_addr).
|
||||
@ -1016,19 +884,6 @@ class PSParallelCompact : AllStatic {
|
||||
static inline HeapWord* dense_prefix(SpaceId space_id);
|
||||
static inline ObjectStartArray* start_array(SpaceId space_id);
|
||||
|
||||
// Update a region in the dense prefix. For each live object
|
||||
// in the region, update it's interior references. For each
|
||||
// dead object, fill it with deadwood. Dead space at the end
|
||||
// of a region range will be filled to the start of the next
|
||||
// live object regardless of the region_index_end. None of the
|
||||
// objects in the dense prefix move and dead space is dead
|
||||
// (holds only dead objects that don't need any processing), so
|
||||
// dead space can be filled in any order.
|
||||
static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
|
||||
SpaceId space_id,
|
||||
size_t region_index_start,
|
||||
size_t region_index_end);
|
||||
|
||||
// Return the address of the count + 1st live word in the range [beg, end).
|
||||
static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
|
||||
|
||||
@ -1056,6 +911,8 @@ class PSParallelCompact : AllStatic {
|
||||
size_t beg_region,
|
||||
HeapWord* end_addr);
|
||||
|
||||
static HeapWord* partial_obj_end(HeapWord* region_start_addr);
|
||||
|
||||
static void fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region);
|
||||
static void fill_and_update_region(ParCompactionManager* cm, size_t region);
|
||||
|
||||
@ -1067,12 +924,6 @@ class PSParallelCompact : AllStatic {
|
||||
// _next_shadow_region filed for each compact manager
|
||||
static void initialize_shadow_regions(uint parallel_gc_threads);
|
||||
|
||||
// Fill in the block table for the specified region.
|
||||
static void fill_blocks(size_t region_idx);
|
||||
|
||||
// Update a single deferred object.
|
||||
static void update_deferred_object(ParCompactionManager* cm, HeapWord* addr);
|
||||
|
||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||
|
||||
@ -1120,14 +971,10 @@ class MoveAndUpdateClosure: public ParMarkBitMapClosure {
|
||||
// return would_overflow.
|
||||
IterationStatus do_addr(HeapWord* addr, size_t size);
|
||||
|
||||
// Copy enough words to fill this closure, starting at source(). Interior
|
||||
// oops and the start array are not updated. Return full.
|
||||
IterationStatus copy_until_full();
|
||||
|
||||
// Copy enough words to fill this closure or to the end of an object,
|
||||
// whichever is smaller, starting at source(). Interior oops and the start
|
||||
// array are not updated.
|
||||
void copy_partial_obj();
|
||||
// whichever is smaller, starting at source(). The start array is not
|
||||
// updated.
|
||||
void copy_partial_obj(size_t partial_obj_size);
|
||||
|
||||
virtual void complete_region(ParCompactionManager* cm, HeapWord* dest_addr,
|
||||
PSParallelCompact::RegionData* region_ptr);
|
||||
@ -1198,31 +1045,6 @@ MoveAndUpdateShadowClosure::MoveAndUpdateShadowClosure(ParMarkBitMap *bitmap,
|
||||
_offset = calculate_shadow_offset(region, shadow);
|
||||
}
|
||||
|
||||
class UpdateOnlyClosure: public ParMarkBitMapClosure {
|
||||
private:
|
||||
ObjectStartArray* const _start_array;
|
||||
|
||||
public:
|
||||
UpdateOnlyClosure(ParMarkBitMap* mbm,
|
||||
ParCompactionManager* cm,
|
||||
PSParallelCompact::SpaceId space_id);
|
||||
|
||||
// Update the object.
|
||||
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
|
||||
|
||||
inline void do_addr(HeapWord* addr);
|
||||
};
|
||||
|
||||
class FillClosure: public ParMarkBitMapClosure {
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id);
|
||||
|
||||
virtual IterationStatus do_addr(HeapWord* addr, size_t size);
|
||||
|
||||
private:
|
||||
ObjectStartArray* const _start_array;
|
||||
};
|
||||
|
||||
void steal_marking_work(TaskTerminator& terminator, uint worker_id);
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_HPP
|
||||
|
@ -78,8 +78,7 @@ inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord*
|
||||
#endif // ASSERT
|
||||
|
||||
inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
const size_t obj_size = obj->size();
|
||||
if (mark_bitmap()->mark_obj(obj, obj_size)) {
|
||||
if (mark_bitmap()->mark_obj(obj)) {
|
||||
ContinuationGCSupport::transform_stack_chunk(obj);
|
||||
return true;
|
||||
} else {
|
||||
@ -88,34 +87,22 @@ inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
|
||||
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = cast_to_oop(summary_data().calc_new_pointer(obj, cm));
|
||||
assert(new_obj != nullptr, "non-null address for live objects");
|
||||
// Is it actually relocated at all?
|
||||
if (new_obj != obj) {
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
|
||||
if (!obj->is_forwarded()) {
|
||||
return;
|
||||
}
|
||||
oop new_obj = obj->forwardee();
|
||||
assert(new_obj != nullptr, "non-null address for live objects");
|
||||
assert(new_obj != obj, "inv");
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
}
|
||||
|
||||
class PCAdjustPointerClosure: public BasicOopIterateClosure {
|
||||
public:
|
||||
PCAdjustPointerClosure(ParCompactionManager* cm) : _cm(cm) {
|
||||
}
|
||||
template <typename T> void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p, _cm); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
|
||||
private:
|
||||
ParCompactionManager* _cm;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
|
||||
|
Loading…
x
Reference in New Issue
Block a user