8320864: Serial: Extract out Full GC related fields from ContiguousSpace
Reviewed-by: kbarrett, sjohanss
This commit is contained in:
parent
176606d0cb
commit
07fce8eff2
@ -36,6 +36,7 @@
|
|||||||
#include "gc/serial/defNewGeneration.hpp"
|
#include "gc/serial/defNewGeneration.hpp"
|
||||||
#include "gc/serial/generation.hpp"
|
#include "gc/serial/generation.hpp"
|
||||||
#include "gc/serial/genMarkSweep.hpp"
|
#include "gc/serial/genMarkSweep.hpp"
|
||||||
|
#include "gc/serial/markSweep.inline.hpp"
|
||||||
#include "gc/serial/serialGcRefProcProxyTask.hpp"
|
#include "gc/serial/serialGcRefProcProxyTask.hpp"
|
||||||
#include "gc/serial/serialHeap.hpp"
|
#include "gc/serial/serialHeap.hpp"
|
||||||
#include "gc/shared/classUnloadingContext.hpp"
|
#include "gc/shared/classUnloadingContext.hpp"
|
||||||
@ -48,7 +49,7 @@
|
|||||||
#include "gc/shared/preservedMarks.inline.hpp"
|
#include "gc/shared/preservedMarks.inline.hpp"
|
||||||
#include "gc/shared/referencePolicy.hpp"
|
#include "gc/shared/referencePolicy.hpp"
|
||||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.inline.hpp"
|
||||||
#include "gc/shared/strongRootsScope.hpp"
|
#include "gc/shared/strongRootsScope.hpp"
|
||||||
#include "gc/shared/weakProcessor.hpp"
|
#include "gc/shared/weakProcessor.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
@ -57,6 +58,7 @@
|
|||||||
#include "prims/jvmtiExport.hpp"
|
#include "prims/jvmtiExport.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/javaThread.hpp"
|
#include "runtime/javaThread.hpp"
|
||||||
|
#include "runtime/prefetch.inline.hpp"
|
||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "utilities/copy.hpp"
|
#include "utilities/copy.hpp"
|
||||||
@ -66,98 +68,281 @@
|
|||||||
#include "jvmci/jvmci.hpp"
|
#include "jvmci/jvmci.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) {
|
class DeadSpacer : StackObj {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
size_t _allowed_deadspace_words;
|
||||||
|
bool _active;
|
||||||
|
ContiguousSpace* _space;
|
||||||
|
|
||||||
SerialHeap* gch = SerialHeap::heap();
|
public:
|
||||||
#ifdef ASSERT
|
DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
|
||||||
if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
|
size_t ratio = _space->allowed_dead_ratio();
|
||||||
assert(clear_all_softrefs, "Policy should have been checked earlier");
|
_active = ratio > 0;
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
gch->trace_heap_before_gc(_gc_tracer);
|
if (_active) {
|
||||||
|
// We allow some amount of garbage towards the bottom of the space, so
|
||||||
// Increment the invocation count
|
// we don't start compacting before there is a significant gain to be made.
|
||||||
_total_invocations++;
|
// Occasionally, we want to ensure a full compaction, which is determined
|
||||||
|
// by the MarkSweepAlwaysCompactCount parameter.
|
||||||
// Capture used regions for each generation that will be
|
if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
|
||||||
// subject to collection, so that card table adjustments can
|
_allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
|
||||||
// be made intelligently (see clear / invalidate further below).
|
} else {
|
||||||
gch->save_used_regions();
|
_active = false;
|
||||||
|
}
|
||||||
allocate_stacks();
|
}
|
||||||
|
|
||||||
mark_sweep_phase1(clear_all_softrefs);
|
|
||||||
|
|
||||||
mark_sweep_phase2();
|
|
||||||
|
|
||||||
// Don't add any more derived pointers during phase3
|
|
||||||
#if COMPILER2_OR_JVMCI
|
|
||||||
assert(DerivedPointerTable::is_active(), "Sanity");
|
|
||||||
DerivedPointerTable::set_active(false);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
mark_sweep_phase3();
|
|
||||||
|
|
||||||
mark_sweep_phase4();
|
|
||||||
|
|
||||||
restore_marks();
|
|
||||||
|
|
||||||
// Set saved marks for allocation profiler (and other things? -- dld)
|
|
||||||
// (Should this be in general part?)
|
|
||||||
gch->save_marks();
|
|
||||||
|
|
||||||
deallocate_stacks();
|
|
||||||
|
|
||||||
MarkSweep::_string_dedup_requests->flush();
|
|
||||||
|
|
||||||
bool is_young_gen_empty = (gch->young_gen()->used() == 0);
|
|
||||||
gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
|
|
||||||
|
|
||||||
gch->prune_scavengable_nmethods();
|
|
||||||
|
|
||||||
// Update heap occupancy information which is used as
|
|
||||||
// input to soft ref clearing policy at the next gc.
|
|
||||||
Universe::heap()->update_capacity_and_used_at_gc();
|
|
||||||
|
|
||||||
// Signal that we have completed a visit to all live objects.
|
|
||||||
Universe::heap()->record_whole_heap_examined_timestamp();
|
|
||||||
|
|
||||||
gch->trace_heap_after_gc(_gc_tracer);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GenMarkSweep::allocate_stacks() {
|
|
||||||
void* scratch = nullptr;
|
|
||||||
size_t num_words;
|
|
||||||
DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
|
|
||||||
young_gen->contribute_scratch(scratch, num_words);
|
|
||||||
|
|
||||||
if (scratch != nullptr) {
|
|
||||||
_preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
|
|
||||||
} else {
|
|
||||||
_preserved_count_max = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_preserved_marks = (PreservedMark*)scratch;
|
bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
|
||||||
_preserved_count = 0;
|
if (!_active) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
_preserved_overflow_stack_set.init(1);
|
size_t dead_length = pointer_delta(dead_end, dead_start);
|
||||||
}
|
if (_allowed_deadspace_words >= dead_length) {
|
||||||
|
_allowed_deadspace_words -= dead_length;
|
||||||
|
CollectedHeap::fill_with_object(dead_start, dead_length);
|
||||||
|
oop obj = cast_to_oop(dead_start);
|
||||||
|
// obj->set_mark(obj->mark().set_marked());
|
||||||
|
|
||||||
|
assert(dead_length == obj->size(), "bad filler object size");
|
||||||
|
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
|
||||||
|
p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
|
||||||
|
|
||||||
void GenMarkSweep::deallocate_stacks() {
|
return true;
|
||||||
if (_preserved_count_max != 0) {
|
} else {
|
||||||
DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
|
_active = false;
|
||||||
young_gen->reset_scratch();
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Implement the "compaction" part of the mark-compact GC algorithm.
|
||||||
|
class Compacter {
|
||||||
|
// There are four spaces in total, but only the first three can be used after
|
||||||
|
// compact. IOW, old and eden/from must be enough for all live objs
|
||||||
|
static constexpr uint max_num_spaces = 4;
|
||||||
|
|
||||||
|
struct CompactionSpace {
|
||||||
|
ContiguousSpace* _space;
|
||||||
|
// Will be the new top after compaction is complete.
|
||||||
|
HeapWord* _compaction_top;
|
||||||
|
// The first dead word in this contiguous space. It's an optimization to
|
||||||
|
// skip large chunk of live objects at the beginning.
|
||||||
|
HeapWord* _first_dead;
|
||||||
|
|
||||||
|
void init(ContiguousSpace* space) {
|
||||||
|
_space = space;
|
||||||
|
_compaction_top = space->bottom();
|
||||||
|
_first_dead = nullptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
CompactionSpace _spaces[max_num_spaces];
|
||||||
|
// The num of spaces to be compacted, i.e. containing live objs.
|
||||||
|
uint _num_spaces;
|
||||||
|
|
||||||
|
uint _index;
|
||||||
|
|
||||||
|
HeapWord* get_compaction_top(uint index) const {
|
||||||
|
return _spaces[index]._compaction_top;
|
||||||
}
|
}
|
||||||
|
|
||||||
_preserved_overflow_stack_set.reclaim();
|
HeapWord* get_first_dead(uint index) const {
|
||||||
_marking_stack.clear();
|
return _spaces[index]._first_dead;
|
||||||
_objarray_stack.clear(true);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
ContiguousSpace* get_space(uint index) const {
|
||||||
|
return _spaces[index]._space;
|
||||||
|
}
|
||||||
|
|
||||||
|
void record_first_dead(uint index, HeapWord* first_dead) {
|
||||||
|
assert(_spaces[index]._first_dead == nullptr, "should write only once");
|
||||||
|
_spaces[index]._first_dead = first_dead;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* alloc(size_t words) {
|
||||||
|
while (true) {
|
||||||
|
if (words <= pointer_delta(_spaces[_index]._space->end(),
|
||||||
|
_spaces[_index]._compaction_top)) {
|
||||||
|
HeapWord* result = _spaces[_index]._compaction_top;
|
||||||
|
_spaces[_index]._compaction_top += words;
|
||||||
|
if (_index == 0) {
|
||||||
|
// old-gen requires BOT update
|
||||||
|
static_cast<TenuredSpace*>(_spaces[0]._space)->update_for_block(result, result + words);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// out-of-memory in this space
|
||||||
|
_index++;
|
||||||
|
assert(_index < max_num_spaces - 1, "the last space should not be used");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void prefetch_read_scan(void* p) {
|
||||||
|
if (PrefetchScanIntervalInBytes >= 0) {
|
||||||
|
Prefetch::read(p, PrefetchScanIntervalInBytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void prefetch_write_scan(void* p) {
|
||||||
|
if (PrefetchScanIntervalInBytes >= 0) {
|
||||||
|
Prefetch::write(p, PrefetchScanIntervalInBytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void prefetch_write_copy(void* p) {
|
||||||
|
if (PrefetchCopyIntervalInBytes >= 0) {
|
||||||
|
Prefetch::write(p, PrefetchCopyIntervalInBytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void forward_obj(oop obj, HeapWord* new_addr) {
|
||||||
|
prefetch_write_scan(obj);
|
||||||
|
if (cast_from_oop<HeapWord*>(obj) != new_addr) {
|
||||||
|
obj->forward_to(cast_to_oop(new_addr));
|
||||||
|
} else {
|
||||||
|
assert(obj->is_gc_marked(), "inv");
|
||||||
|
// This obj will stay in-place. Fix the markword.
|
||||||
|
obj->init_mark();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
|
||||||
|
for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
|
||||||
|
prefetch_read_scan(i_addr);
|
||||||
|
oop obj = cast_to_oop(i_addr);
|
||||||
|
if (obj->is_gc_marked()) {
|
||||||
|
return i_addr;
|
||||||
|
}
|
||||||
|
i_addr += obj->size();
|
||||||
|
}
|
||||||
|
return end;
|
||||||
|
};
|
||||||
|
|
||||||
|
static size_t relocate(HeapWord* addr) {
|
||||||
|
// Prefetch source and destination
|
||||||
|
prefetch_read_scan(addr);
|
||||||
|
|
||||||
|
oop obj = cast_to_oop(addr);
|
||||||
|
oop new_obj = obj->forwardee();
|
||||||
|
HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
|
||||||
|
assert(addr != new_addr, "inv");
|
||||||
|
prefetch_write_copy(new_addr);
|
||||||
|
|
||||||
|
size_t obj_size = obj->size();
|
||||||
|
Copy::aligned_conjoint_words(addr, new_addr, obj_size);
|
||||||
|
new_obj->init_mark();
|
||||||
|
|
||||||
|
return obj_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit Compacter(SerialHeap* heap) {
|
||||||
|
// In this order so that heap is compacted towards old-gen.
|
||||||
|
_spaces[0].init(heap->old_gen()->space());
|
||||||
|
_spaces[1].init(heap->young_gen()->eden());
|
||||||
|
_spaces[2].init(heap->young_gen()->from());
|
||||||
|
|
||||||
|
bool is_promotion_failed = (heap->young_gen()->from()->next_compaction_space() != nullptr);
|
||||||
|
if (is_promotion_failed) {
|
||||||
|
_spaces[3].init(heap->young_gen()->to());
|
||||||
|
_num_spaces = 4;
|
||||||
|
} else {
|
||||||
|
_num_spaces = 3;
|
||||||
|
}
|
||||||
|
_index = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void phase2_calculate_new_addr() {
|
||||||
|
for (uint i = 0; i < _num_spaces; ++i) {
|
||||||
|
ContiguousSpace* space = get_space(i);
|
||||||
|
HeapWord* cur_addr = space->bottom();
|
||||||
|
HeapWord* top = space->top();
|
||||||
|
|
||||||
|
bool record_first_dead_done = false;
|
||||||
|
|
||||||
|
DeadSpacer dead_spacer(space);
|
||||||
|
|
||||||
|
while (cur_addr < top) {
|
||||||
|
oop obj = cast_to_oop(cur_addr);
|
||||||
|
size_t obj_size = obj->size();
|
||||||
|
if (obj->is_gc_marked()) {
|
||||||
|
HeapWord* new_addr = alloc(obj_size);
|
||||||
|
forward_obj(obj, new_addr);
|
||||||
|
cur_addr += obj_size;
|
||||||
|
} else {
|
||||||
|
// Skipping the current known-unmarked obj
|
||||||
|
HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
|
||||||
|
if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
|
||||||
|
// Register space for the filler obj
|
||||||
|
alloc(pointer_delta(next_live_addr, cur_addr));
|
||||||
|
} else {
|
||||||
|
if (!record_first_dead_done) {
|
||||||
|
record_first_dead(i, cur_addr);
|
||||||
|
record_first_dead_done = true;
|
||||||
|
}
|
||||||
|
*(HeapWord**)cur_addr = next_live_addr;
|
||||||
|
}
|
||||||
|
cur_addr = next_live_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!record_first_dead_done) {
|
||||||
|
record_first_dead(i, top);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void phase3_adjust_pointers() {
|
||||||
|
for (uint i = 0; i < _num_spaces; ++i) {
|
||||||
|
ContiguousSpace* space = get_space(i);
|
||||||
|
HeapWord* cur_addr = space->bottom();
|
||||||
|
HeapWord* const top = space->top();
|
||||||
|
HeapWord* const first_dead = get_first_dead(i);
|
||||||
|
|
||||||
|
while (cur_addr < top) {
|
||||||
|
prefetch_write_scan(cur_addr);
|
||||||
|
if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
|
||||||
|
size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_addr));
|
||||||
|
cur_addr += size;
|
||||||
|
} else {
|
||||||
|
assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
|
||||||
|
cur_addr = *(HeapWord**)cur_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void phase4_compact() {
|
||||||
|
for (uint i = 0; i < _num_spaces; ++i) {
|
||||||
|
ContiguousSpace* space = get_space(i);
|
||||||
|
HeapWord* cur_addr = space->bottom();
|
||||||
|
HeapWord* top = space->top();
|
||||||
|
|
||||||
|
// Check if the first obj inside this space is forwarded.
|
||||||
|
if (!cast_to_oop(cur_addr)->is_forwarded()) {
|
||||||
|
// Jump over consecutive (in-place) live-objs-chunk
|
||||||
|
cur_addr = get_first_dead(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (cur_addr < top) {
|
||||||
|
if (!cast_to_oop(cur_addr)->is_forwarded()) {
|
||||||
|
cur_addr = *(HeapWord**) cur_addr;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
cur_addr += relocate(cur_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset top and unused memory
|
||||||
|
space->set_top(get_compaction_top(i));
|
||||||
|
if (ZapUnusedHeapArea) {
|
||||||
|
space->mangle_unused_area();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void GenMarkSweep::phase1_mark(bool clear_all_softrefs) {
|
||||||
// Recursively traverse all live objects and mark them
|
// Recursively traverse all live objects and mark them
|
||||||
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
|
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
|
||||||
|
|
||||||
@ -241,54 +426,121 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) {
|
||||||
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||||
|
|
||||||
void GenMarkSweep::mark_sweep_phase2() {
|
|
||||||
// Now all live objects are marked, compute the new object addresses.
|
|
||||||
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
|
|
||||||
|
|
||||||
SerialHeap::heap()->prepare_for_compaction();
|
|
||||||
}
|
|
||||||
|
|
||||||
class GenAdjustPointersClosure: public SerialHeap::GenClosure {
|
|
||||||
public:
|
|
||||||
void do_generation(Generation* gen) {
|
|
||||||
gen->adjust_pointers();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void GenMarkSweep::mark_sweep_phase3() {
|
|
||||||
SerialHeap* gch = SerialHeap::heap();
|
SerialHeap* gch = SerialHeap::heap();
|
||||||
|
#ifdef ASSERT
|
||||||
// Adjust the pointers to reflect the new locations
|
if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
|
||||||
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
|
assert(clear_all_softrefs, "Policy should have been checked earlier");
|
||||||
|
|
||||||
ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
|
|
||||||
|
|
||||||
CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
|
|
||||||
gch->process_roots(SerialHeap::SO_AllCodeCache,
|
|
||||||
&adjust_pointer_closure,
|
|
||||||
&adjust_cld_closure,
|
|
||||||
&adjust_cld_closure,
|
|
||||||
&code_closure);
|
|
||||||
|
|
||||||
gch->gen_process_weak_roots(&adjust_pointer_closure);
|
|
||||||
|
|
||||||
adjust_marks();
|
|
||||||
GenAdjustPointersClosure blk;
|
|
||||||
gch->generation_iterate(&blk, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
class GenCompactClosure: public SerialHeap::GenClosure {
|
|
||||||
public:
|
|
||||||
void do_generation(Generation* gen) {
|
|
||||||
gen->compact();
|
|
||||||
}
|
}
|
||||||
};
|
#endif
|
||||||
|
|
||||||
void GenMarkSweep::mark_sweep_phase4() {
|
gch->trace_heap_before_gc(_gc_tracer);
|
||||||
// All pointers are now adjusted, move objects accordingly
|
|
||||||
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
|
|
||||||
|
|
||||||
GenCompactClosure blk;
|
// Increment the invocation count
|
||||||
SerialHeap::heap()->generation_iterate(&blk, true);
|
_total_invocations++;
|
||||||
|
|
||||||
|
// Capture used regions for each generation that will be
|
||||||
|
// subject to collection, so that card table adjustments can
|
||||||
|
// be made intelligently (see clear / invalidate further below).
|
||||||
|
gch->save_used_regions();
|
||||||
|
|
||||||
|
allocate_stacks();
|
||||||
|
|
||||||
|
phase1_mark(clear_all_softrefs);
|
||||||
|
|
||||||
|
Compacter compacter{gch};
|
||||||
|
|
||||||
|
{
|
||||||
|
// Now all live objects are marked, compute the new object addresses.
|
||||||
|
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
|
||||||
|
|
||||||
|
compacter.phase2_calculate_new_addr();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't add any more derived pointers during phase3
|
||||||
|
#if COMPILER2_OR_JVMCI
|
||||||
|
assert(DerivedPointerTable::is_active(), "Sanity");
|
||||||
|
DerivedPointerTable::set_active(false);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
{
|
||||||
|
// Adjust the pointers to reflect the new locations
|
||||||
|
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
|
||||||
|
|
||||||
|
ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
|
||||||
|
|
||||||
|
CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
|
||||||
|
gch->process_roots(SerialHeap::SO_AllCodeCache,
|
||||||
|
&adjust_pointer_closure,
|
||||||
|
&adjust_cld_closure,
|
||||||
|
&adjust_cld_closure,
|
||||||
|
&code_closure);
|
||||||
|
|
||||||
|
WeakProcessor::oops_do(&adjust_pointer_closure);
|
||||||
|
|
||||||
|
adjust_marks();
|
||||||
|
compacter.phase3_adjust_pointers();
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// All pointers are now adjusted, move objects accordingly
|
||||||
|
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
|
||||||
|
|
||||||
|
compacter.phase4_compact();
|
||||||
|
}
|
||||||
|
|
||||||
|
restore_marks();
|
||||||
|
|
||||||
|
// Set saved marks for allocation profiler (and other things? -- dld)
|
||||||
|
// (Should this be in general part?)
|
||||||
|
gch->save_marks();
|
||||||
|
|
||||||
|
deallocate_stacks();
|
||||||
|
|
||||||
|
MarkSweep::_string_dedup_requests->flush();
|
||||||
|
|
||||||
|
bool is_young_gen_empty = (gch->young_gen()->used() == 0);
|
||||||
|
gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
|
||||||
|
|
||||||
|
gch->prune_scavengable_nmethods();
|
||||||
|
|
||||||
|
// Update heap occupancy information which is used as
|
||||||
|
// input to soft ref clearing policy at the next gc.
|
||||||
|
Universe::heap()->update_capacity_and_used_at_gc();
|
||||||
|
|
||||||
|
// Signal that we have completed a visit to all live objects.
|
||||||
|
Universe::heap()->record_whole_heap_examined_timestamp();
|
||||||
|
|
||||||
|
gch->trace_heap_after_gc(_gc_tracer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GenMarkSweep::allocate_stacks() {
|
||||||
|
void* scratch = nullptr;
|
||||||
|
size_t num_words;
|
||||||
|
DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
|
||||||
|
young_gen->contribute_scratch(scratch, num_words);
|
||||||
|
|
||||||
|
if (scratch != nullptr) {
|
||||||
|
_preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
|
||||||
|
} else {
|
||||||
|
_preserved_count_max = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
_preserved_marks = (PreservedMark*)scratch;
|
||||||
|
_preserved_count = 0;
|
||||||
|
|
||||||
|
_preserved_overflow_stack_set.init(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GenMarkSweep::deallocate_stacks() {
|
||||||
|
if (_preserved_count_max != 0) {
|
||||||
|
DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
|
||||||
|
young_gen->reset_scratch();
|
||||||
|
}
|
||||||
|
|
||||||
|
_preserved_overflow_stack_set.reclaim();
|
||||||
|
_marking_stack.clear();
|
||||||
|
_objarray_stack.clear(true);
|
||||||
}
|
}
|
||||||
|
@ -32,15 +32,8 @@ class GenMarkSweep : public MarkSweep {
|
|||||||
static void invoke_at_safepoint(bool clear_all_softrefs);
|
static void invoke_at_safepoint(bool clear_all_softrefs);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// Mark live objects
|
// Mark live objects
|
||||||
static void mark_sweep_phase1(bool clear_all_softrefs);
|
static void phase1_mark(bool clear_all_softrefs);
|
||||||
// Calculate new addresses
|
|
||||||
static void mark_sweep_phase2();
|
|
||||||
// Update pointers
|
|
||||||
static void mark_sweep_phase3();
|
|
||||||
// Move objects to new positions
|
|
||||||
static void mark_sweep_phase4();
|
|
||||||
|
|
||||||
// Temporary data structures for traversal and storing/restoring marks
|
// Temporary data structures for traversal and storing/restoring marks
|
||||||
static void allocate_stacks();
|
static void allocate_stacks();
|
||||||
|
@ -218,34 +218,3 @@ void Generation::object_iterate(ObjectClosure* cl) {
|
|||||||
GenerationObjIterateClosure blk(cl);
|
GenerationObjIterateClosure blk(cl);
|
||||||
space_iterate(&blk);
|
space_iterate(&blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Generation::prepare_for_compaction(CompactPoint* cp) {
|
|
||||||
// Generic implementation, can be specialized
|
|
||||||
ContiguousSpace* space = first_compaction_space();
|
|
||||||
while (space != nullptr) {
|
|
||||||
space->prepare_for_compaction(cp);
|
|
||||||
space = space->next_compaction_space();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class AdjustPointersClosure: public SpaceClosure {
|
|
||||||
public:
|
|
||||||
void do_space(Space* sp) {
|
|
||||||
sp->adjust_pointers();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void Generation::adjust_pointers() {
|
|
||||||
// Note that this is done over all spaces, not just the compactible
|
|
||||||
// ones.
|
|
||||||
AdjustPointersClosure blk;
|
|
||||||
space_iterate(&blk, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Generation::compact() {
|
|
||||||
ContiguousSpace* sp = first_compaction_space();
|
|
||||||
while (sp != nullptr) {
|
|
||||||
sp->compact();
|
|
||||||
sp = sp->next_compaction_space();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -51,7 +51,7 @@
|
|||||||
class DefNewGeneration;
|
class DefNewGeneration;
|
||||||
class GCMemoryManager;
|
class GCMemoryManager;
|
||||||
class ContiguousSpace;
|
class ContiguousSpace;
|
||||||
class CompactPoint;
|
|
||||||
class OopClosure;
|
class OopClosure;
|
||||||
class GCStats;
|
class GCStats;
|
||||||
|
|
||||||
@ -286,13 +286,6 @@ class Generation: public CHeapObj<mtGC> {
|
|||||||
GCStats* gc_stats() const { return _gc_stats; }
|
GCStats* gc_stats() const { return _gc_stats; }
|
||||||
virtual void update_gc_stats(Generation* current_generation, bool full) {}
|
virtual void update_gc_stats(Generation* current_generation, bool full) {}
|
||||||
|
|
||||||
// Mark sweep support phase2
|
|
||||||
virtual void prepare_for_compaction(CompactPoint* cp);
|
|
||||||
// Mark sweep support phase3
|
|
||||||
virtual void adjust_pointers();
|
|
||||||
// Mark sweep support phase4
|
|
||||||
virtual void compact();
|
|
||||||
|
|
||||||
// Accessing "marks".
|
// Accessing "marks".
|
||||||
|
|
||||||
// This function gives a generation a chance to note a point between
|
// This function gives a generation a chance to note a point between
|
||||||
|
@ -70,8 +70,6 @@ class TenuredGeneration: public Generation {
|
|||||||
GenerationCounters* _gen_counters;
|
GenerationCounters* _gen_counters;
|
||||||
CSpaceCounters* _space_counters;
|
CSpaceCounters* _space_counters;
|
||||||
|
|
||||||
// Accessing spaces
|
|
||||||
TenuredSpace* space() const { return _the_space; }
|
|
||||||
|
|
||||||
// Attempt to expand the generation by "bytes". Expand by at a
|
// Attempt to expand the generation by "bytes". Expand by at a
|
||||||
// minimum "expand_bytes". Return true if some amount (not
|
// minimum "expand_bytes". Return true if some amount (not
|
||||||
@ -85,6 +83,8 @@ class TenuredGeneration: public Generation {
|
|||||||
public:
|
public:
|
||||||
virtual void compute_new_size();
|
virtual void compute_new_size();
|
||||||
|
|
||||||
|
TenuredSpace* space() const { return _the_space; }
|
||||||
|
|
||||||
// Grow generation with specified size (returns false if unable to grow)
|
// Grow generation with specified size (returns false if unable to grow)
|
||||||
bool grow_by(size_t bytes);
|
bool grow_by(size_t bytes);
|
||||||
// Grow generation to reserved size.
|
// Grow generation to reserved size.
|
||||||
|
@ -713,10 +713,6 @@ void GenCollectedHeap::process_roots(ScanningOption so,
|
|||||||
DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
|
DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
|
|
||||||
WeakProcessor::oops_do(root_closure);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GenCollectedHeap::no_allocs_since_save_marks() {
|
bool GenCollectedHeap::no_allocs_since_save_marks() {
|
||||||
return _young_gen->no_allocs_since_save_marks() &&
|
return _young_gen->no_allocs_since_save_marks() &&
|
||||||
_old_gen->no_allocs_since_save_marks();
|
_old_gen->no_allocs_since_save_marks();
|
||||||
@ -911,15 +907,6 @@ GenCollectedHeap* GenCollectedHeap::heap() {
|
|||||||
return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
|
return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
void GenCollectedHeap::prepare_for_compaction() {
|
|
||||||
// Start by compacting into same gen.
|
|
||||||
CompactPoint cp(_old_gen);
|
|
||||||
_old_gen->prepare_for_compaction(&cp);
|
|
||||||
_young_gen->prepare_for_compaction(&cp);
|
|
||||||
}
|
|
||||||
#endif // INCLUDE_SERIALGC
|
|
||||||
|
|
||||||
void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
|
void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
|
||||||
log_debug(gc, verify)("%s", _old_gen->name());
|
log_debug(gc, verify)("%s", _old_gen->name());
|
||||||
_old_gen->verify();
|
_old_gen->verify();
|
||||||
|
@ -292,11 +292,6 @@ public:
|
|||||||
CLDClosure* weak_cld_closure,
|
CLDClosure* weak_cld_closure,
|
||||||
CodeBlobToOopClosure* code_roots);
|
CodeBlobToOopClosure* code_roots);
|
||||||
|
|
||||||
// Apply "root_closure" to all the weak roots of the system.
|
|
||||||
// These include JNI weak roots, string table,
|
|
||||||
// and referents of reachable weak refs.
|
|
||||||
void gen_process_weak_roots(OopClosure* root_closure);
|
|
||||||
|
|
||||||
// Set the saved marks of generations, if that makes sense.
|
// Set the saved marks of generations, if that makes sense.
|
||||||
// In particular, if any generation might iterate over the oops
|
// In particular, if any generation might iterate over the oops
|
||||||
// in other generations, it should call this method.
|
// in other generations, it should call this method.
|
||||||
@ -340,13 +335,6 @@ private:
|
|||||||
HeapWord* mem_allocate_work(size_t size,
|
HeapWord* mem_allocate_work(size_t size,
|
||||||
bool is_tlab);
|
bool is_tlab);
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
// For use by mark-sweep. As implemented, mark-sweep-compact is global
|
|
||||||
// in an essential way: compaction is performed across generations, by
|
|
||||||
// iterating over spaces.
|
|
||||||
void prepare_for_compaction();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Save the tops of the spaces in all generations
|
// Save the tops of the spaces in all generations
|
||||||
void record_gen_tops_before_GC() PRODUCT_RETURN;
|
void record_gen_tops_before_GC() PRODUCT_RETURN;
|
||||||
|
|
||||||
|
@ -35,19 +35,13 @@
|
|||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
#include "runtime/atomic.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
#include "runtime/prefetch.inline.hpp"
|
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "utilities/copy.hpp"
|
#include "utilities/copy.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
#include "gc/serial/serialBlockOffsetTable.inline.hpp"
|
|
||||||
#include "gc/serial/defNewGeneration.hpp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ContiguousSpace::ContiguousSpace(): Space(),
|
ContiguousSpace::ContiguousSpace(): Space(),
|
||||||
_compaction_top(nullptr),
|
|
||||||
_next_compaction_space(nullptr),
|
_next_compaction_space(nullptr),
|
||||||
_top(nullptr) {
|
_top(nullptr) {
|
||||||
_mangler = new GenSpaceMangler(this);
|
_mangler = new GenSpaceMangler(this);
|
||||||
@ -59,8 +53,7 @@ ContiguousSpace::~ContiguousSpace() {
|
|||||||
|
|
||||||
void ContiguousSpace::initialize(MemRegion mr,
|
void ContiguousSpace::initialize(MemRegion mr,
|
||||||
bool clear_space,
|
bool clear_space,
|
||||||
bool mangle_space)
|
bool mangle_space) {
|
||||||
{
|
|
||||||
HeapWord* bottom = mr.start();
|
HeapWord* bottom = mr.start();
|
||||||
HeapWord* end = mr.end();
|
HeapWord* end = mr.end();
|
||||||
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
|
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
|
||||||
@ -70,7 +63,6 @@ void ContiguousSpace::initialize(MemRegion mr,
|
|||||||
if (clear_space) {
|
if (clear_space) {
|
||||||
clear(mangle_space);
|
clear(mangle_space);
|
||||||
}
|
}
|
||||||
set_compaction_top(bottom);
|
|
||||||
_next_compaction_space = nullptr;
|
_next_compaction_space = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +72,6 @@ void ContiguousSpace::clear(bool mangle_space) {
|
|||||||
if (ZapUnusedHeapArea && mangle_space) {
|
if (ZapUnusedHeapArea && mangle_space) {
|
||||||
mangle_unused_area();
|
mangle_unused_area();
|
||||||
}
|
}
|
||||||
_compaction_top = bottom();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
||||||
@ -115,230 +106,6 @@ void ContiguousSpace::mangle_unused_area_complete() {
|
|||||||
#endif // NOT_PRODUCT
|
#endif // NOT_PRODUCT
|
||||||
|
|
||||||
|
|
||||||
HeapWord* ContiguousSpace::forward(oop q, size_t size,
|
|
||||||
CompactPoint* cp, HeapWord* compact_top) {
|
|
||||||
// q is alive
|
|
||||||
// First check if we should switch compaction space
|
|
||||||
assert(this == cp->space, "'this' should be current compaction space.");
|
|
||||||
size_t compaction_max_size = pointer_delta(end(), compact_top);
|
|
||||||
while (size > compaction_max_size) {
|
|
||||||
// switch to next compaction space
|
|
||||||
cp->space->set_compaction_top(compact_top);
|
|
||||||
cp->space = cp->space->next_compaction_space();
|
|
||||||
if (cp->space == nullptr) {
|
|
||||||
cp->gen = GenCollectedHeap::heap()->young_gen();
|
|
||||||
assert(cp->gen != nullptr, "compaction must succeed");
|
|
||||||
cp->space = cp->gen->first_compaction_space();
|
|
||||||
assert(cp->space != nullptr, "generation must have a first compaction space");
|
|
||||||
}
|
|
||||||
compact_top = cp->space->bottom();
|
|
||||||
cp->space->set_compaction_top(compact_top);
|
|
||||||
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
|
|
||||||
}
|
|
||||||
|
|
||||||
// store the forwarding pointer into the mark word
|
|
||||||
if (cast_from_oop<HeapWord*>(q) != compact_top) {
|
|
||||||
q->forward_to(cast_to_oop(compact_top));
|
|
||||||
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
|
|
||||||
} else {
|
|
||||||
// if the object isn't moving we can just set the mark to the default
|
|
||||||
// mark and handle it specially later on.
|
|
||||||
q->init_mark();
|
|
||||||
assert(!q->is_forwarded(), "should not be forwarded");
|
|
||||||
}
|
|
||||||
|
|
||||||
compact_top += size;
|
|
||||||
|
|
||||||
// We need to update the offset table so that the beginnings of objects can be
|
|
||||||
// found during scavenge. Note that we are updating the offset table based on
|
|
||||||
// where the object will be once the compaction phase finishes.
|
|
||||||
cp->space->update_for_block(compact_top - size, compact_top);
|
|
||||||
return compact_top;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
|
|
||||||
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
||||||
// Compute the new addresses for the live objects and store it in the mark
|
|
||||||
// Used by universe::mark_sweep_phase2()
|
|
||||||
|
|
||||||
// We're sure to be here before any objects are compacted into this
|
|
||||||
// space, so this is a good time to initialize this:
|
|
||||||
set_compaction_top(bottom());
|
|
||||||
|
|
||||||
if (cp->space == nullptr) {
|
|
||||||
assert(cp->gen != nullptr, "need a generation");
|
|
||||||
assert(cp->gen->first_compaction_space() == this, "just checking");
|
|
||||||
cp->space = cp->gen->first_compaction_space();
|
|
||||||
cp->space->set_compaction_top(cp->space->bottom());
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
|
|
||||||
|
|
||||||
DeadSpacer dead_spacer(this);
|
|
||||||
|
|
||||||
HeapWord* end_of_live = bottom(); // One byte beyond the last byte of the last live object.
|
|
||||||
HeapWord* first_dead = nullptr; // The first dead object.
|
|
||||||
|
|
||||||
const intx interval = PrefetchScanIntervalInBytes;
|
|
||||||
|
|
||||||
HeapWord* cur_obj = bottom();
|
|
||||||
HeapWord* scan_limit = top();
|
|
||||||
|
|
||||||
while (cur_obj < scan_limit) {
|
|
||||||
if (cast_to_oop(cur_obj)->is_gc_marked()) {
|
|
||||||
// prefetch beyond cur_obj
|
|
||||||
Prefetch::write(cur_obj, interval);
|
|
||||||
size_t size = cast_to_oop(cur_obj)->size();
|
|
||||||
compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top);
|
|
||||||
cur_obj += size;
|
|
||||||
end_of_live = cur_obj;
|
|
||||||
} else {
|
|
||||||
// run over all the contiguous dead objects
|
|
||||||
HeapWord* end = cur_obj;
|
|
||||||
do {
|
|
||||||
// prefetch beyond end
|
|
||||||
Prefetch::write(end, interval);
|
|
||||||
end += cast_to_oop(end)->size();
|
|
||||||
} while (end < scan_limit && !cast_to_oop(end)->is_gc_marked());
|
|
||||||
|
|
||||||
// see if we might want to pretend this object is alive so that
|
|
||||||
// we don't have to compact quite as often.
|
|
||||||
if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
|
|
||||||
oop obj = cast_to_oop(cur_obj);
|
|
||||||
compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
|
|
||||||
end_of_live = end;
|
|
||||||
} else {
|
|
||||||
// otherwise, it really is a free region.
|
|
||||||
|
|
||||||
// cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
|
|
||||||
*(HeapWord**)cur_obj = end;
|
|
||||||
|
|
||||||
// see if this is the first dead region.
|
|
||||||
if (first_dead == nullptr) {
|
|
||||||
first_dead = cur_obj;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// move on to the next object
|
|
||||||
cur_obj = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(cur_obj == scan_limit, "just checking");
|
|
||||||
_end_of_live = end_of_live;
|
|
||||||
if (first_dead != nullptr) {
|
|
||||||
_first_dead = first_dead;
|
|
||||||
} else {
|
|
||||||
_first_dead = end_of_live;
|
|
||||||
}
|
|
||||||
|
|
||||||
// save the compaction_top of the compaction space.
|
|
||||||
cp->space->set_compaction_top(compact_top);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ContiguousSpace::adjust_pointers() {
|
|
||||||
// Check first is there is any work to do.
|
|
||||||
if (used() == 0) {
|
|
||||||
return; // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjust all the interior pointers to point at the new locations of objects
|
|
||||||
// Used by MarkSweep::mark_sweep_phase3()
|
|
||||||
|
|
||||||
HeapWord* cur_obj = bottom();
|
|
||||||
HeapWord* const end_of_live = _end_of_live; // Established by prepare_for_compaction().
|
|
||||||
HeapWord* const first_dead = _first_dead; // Established by prepare_for_compaction().
|
|
||||||
|
|
||||||
assert(first_dead <= end_of_live, "Stands to reason, no?");
|
|
||||||
|
|
||||||
const intx interval = PrefetchScanIntervalInBytes;
|
|
||||||
|
|
||||||
debug_only(HeapWord* prev_obj = nullptr);
|
|
||||||
while (cur_obj < end_of_live) {
|
|
||||||
Prefetch::write(cur_obj, interval);
|
|
||||||
if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
|
|
||||||
// cur_obj is alive
|
|
||||||
// point all the oops to the new location
|
|
||||||
size_t size = MarkSweep::adjust_pointers(cast_to_oop(cur_obj));
|
|
||||||
debug_only(prev_obj = cur_obj);
|
|
||||||
cur_obj += size;
|
|
||||||
} else {
|
|
||||||
debug_only(prev_obj = cur_obj);
|
|
||||||
// cur_obj is not a live object, instead it points at the next live object
|
|
||||||
cur_obj = *(HeapWord**)cur_obj;
|
|
||||||
assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(cur_obj == end_of_live, "just checking");
|
|
||||||
}
|
|
||||||
|
|
||||||
void ContiguousSpace::compact() {
|
|
||||||
// Copy all live objects to their new location
|
|
||||||
// Used by MarkSweep::mark_sweep_phase4()
|
|
||||||
|
|
||||||
verify_up_to_first_dead(this);
|
|
||||||
|
|
||||||
HeapWord* const start = bottom();
|
|
||||||
HeapWord* const end_of_live = _end_of_live;
|
|
||||||
|
|
||||||
assert(_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(_first_dead), p2i(end_of_live));
|
|
||||||
if (_first_dead == end_of_live && (start == end_of_live || !cast_to_oop(start)->is_gc_marked())) {
|
|
||||||
// Nothing to compact. The space is either empty or all live object should be left in place.
|
|
||||||
clear_empty_region(this);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const intx scan_interval = PrefetchScanIntervalInBytes;
|
|
||||||
const intx copy_interval = PrefetchCopyIntervalInBytes;
|
|
||||||
|
|
||||||
assert(start < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(start), p2i(end_of_live));
|
|
||||||
HeapWord* cur_obj = start;
|
|
||||||
if (_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
|
|
||||||
// All object before _first_dead can be skipped. They should not be moved.
|
|
||||||
// A pointer to the first live object is stored at the memory location for _first_dead.
|
|
||||||
cur_obj = *(HeapWord**)(_first_dead);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug_only(HeapWord* prev_obj = nullptr);
|
|
||||||
while (cur_obj < end_of_live) {
|
|
||||||
if (!cast_to_oop(cur_obj)->is_forwarded()) {
|
|
||||||
debug_only(prev_obj = cur_obj);
|
|
||||||
// The first word of the dead object contains a pointer to the next live object or end of space.
|
|
||||||
cur_obj = *(HeapWord**)cur_obj;
|
|
||||||
assert(cur_obj > prev_obj, "we should be moving forward through memory");
|
|
||||||
} else {
|
|
||||||
// prefetch beyond q
|
|
||||||
Prefetch::read(cur_obj, scan_interval);
|
|
||||||
|
|
||||||
// size and destination
|
|
||||||
size_t size = cast_to_oop(cur_obj)->size();
|
|
||||||
HeapWord* compaction_top = cast_from_oop<HeapWord*>(cast_to_oop(cur_obj)->forwardee());
|
|
||||||
|
|
||||||
// prefetch beyond compaction_top
|
|
||||||
Prefetch::write(compaction_top, copy_interval);
|
|
||||||
|
|
||||||
// copy object and reinit its mark
|
|
||||||
assert(cur_obj != compaction_top, "everything in this pass should be moving");
|
|
||||||
Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
|
|
||||||
oop new_obj = cast_to_oop(compaction_top);
|
|
||||||
|
|
||||||
ContinuationGCSupport::transform_stack_chunk(new_obj);
|
|
||||||
|
|
||||||
new_obj->init_mark();
|
|
||||||
assert(new_obj->klass() != nullptr, "should have a class");
|
|
||||||
|
|
||||||
debug_only(prev_obj = cur_obj);
|
|
||||||
cur_obj += size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
clear_empty_region(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // INCLUDE_SERIALGC
|
|
||||||
|
|
||||||
void Space::print_short() const { print_short_on(tty); }
|
void Space::print_short() const { print_short_on(tty); }
|
||||||
|
|
||||||
void Space::print_short_on(outputStream* st) const {
|
void Space::print_short_on(outputStream* st) const {
|
||||||
@ -481,10 +248,6 @@ HeapWord* ContiguousSpace::par_allocate(size_t size) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
#if INCLUDE_SERIALGC
|
||||||
void TenuredSpace::update_for_block(HeapWord* start, HeapWord* end) {
|
|
||||||
_offsets.update_for_block(start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* TenuredSpace::block_start_const(const void* addr) const {
|
HeapWord* TenuredSpace::block_start_const(const void* addr) const {
|
||||||
HeapWord* cur_block = _offsets.block_start_reaching_into_card(addr);
|
HeapWord* cur_block = _offsets.block_start_reaching_into_card(addr);
|
||||||
|
|
||||||
|
@ -184,29 +184,12 @@ class Space: public CHeapObj<mtGC> {
|
|||||||
// Allocation (return null if full). Enforces mutual exclusion internally.
|
// Allocation (return null if full). Enforces mutual exclusion internally.
|
||||||
virtual HeapWord* par_allocate(size_t word_size) = 0;
|
virtual HeapWord* par_allocate(size_t word_size) = 0;
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
// Mark-sweep-compact support: all spaces can update pointers to objects
|
|
||||||
// moving as a part of compaction.
|
|
||||||
virtual void adjust_pointers() = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void print() const;
|
void print() const;
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
void print_short() const;
|
void print_short() const;
|
||||||
void print_short_on(outputStream* st) const;
|
void print_short_on(outputStream* st) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A structure to represent a point at which objects are being copied
|
|
||||||
// during compaction.
|
|
||||||
class CompactPoint : public StackObj {
|
|
||||||
public:
|
|
||||||
Generation* gen;
|
|
||||||
ContiguousSpace* space;
|
|
||||||
|
|
||||||
CompactPoint(Generation* g = nullptr) :
|
|
||||||
gen(g), space(nullptr) {}
|
|
||||||
};
|
|
||||||
|
|
||||||
class GenSpaceMangler;
|
class GenSpaceMangler;
|
||||||
|
|
||||||
// A space in which the free area is contiguous. It therefore supports
|
// A space in which the free area is contiguous. It therefore supports
|
||||||
@ -215,26 +198,13 @@ class ContiguousSpace: public Space {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HeapWord* _compaction_top;
|
|
||||||
ContiguousSpace* _next_compaction_space;
|
ContiguousSpace* _next_compaction_space;
|
||||||
|
|
||||||
static inline void verify_up_to_first_dead(ContiguousSpace* space) NOT_DEBUG_RETURN;
|
protected:
|
||||||
|
|
||||||
static inline void clear_empty_region(ContiguousSpace* space);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
HeapWord* _top;
|
HeapWord* _top;
|
||||||
// A helper for mangling the unused area of the space in debug builds.
|
// A helper for mangling the unused area of the space in debug builds.
|
||||||
GenSpaceMangler* _mangler;
|
GenSpaceMangler* _mangler;
|
||||||
|
|
||||||
// Used during compaction.
|
|
||||||
HeapWord* _first_dead;
|
|
||||||
HeapWord* _end_of_live;
|
|
||||||
|
|
||||||
// This the function to invoke when an allocation of an object covering
|
|
||||||
// "start" to "end" occurs to update other internal data structures.
|
|
||||||
virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
|
|
||||||
|
|
||||||
GenSpaceMangler* mangler() { return _mangler; }
|
GenSpaceMangler* mangler() { return _mangler; }
|
||||||
|
|
||||||
// Allocation helpers (return null if full).
|
// Allocation helpers (return null if full).
|
||||||
@ -254,23 +224,13 @@ private:
|
|||||||
|
|
||||||
// The "clear" method must be called on a region that may have
|
// The "clear" method must be called on a region that may have
|
||||||
// had allocation performed in it, but is now to be considered empty.
|
// had allocation performed in it, but is now to be considered empty.
|
||||||
virtual void clear(bool mangle_space);
|
void clear(bool mangle_space);
|
||||||
|
|
||||||
// Used temporarily during a compaction phase to hold the value
|
|
||||||
// top should have when compaction is complete.
|
|
||||||
HeapWord* compaction_top() const { return _compaction_top; }
|
|
||||||
|
|
||||||
void set_compaction_top(HeapWord* value) {
|
|
||||||
assert(value == nullptr || (value >= bottom() && value <= end()),
|
|
||||||
"should point inside space");
|
|
||||||
_compaction_top = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the next space (in the current generation) to be compacted in
|
// Returns the next space (in the current generation) to be compacted in
|
||||||
// the global compaction order. Also is used to select the next
|
// the global compaction order. Also is used to select the next
|
||||||
// space into which to compact.
|
// space into which to compact.
|
||||||
|
|
||||||
virtual ContiguousSpace* next_compaction_space() const {
|
ContiguousSpace* next_compaction_space() const {
|
||||||
return _next_compaction_space;
|
return _next_compaction_space;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,42 +238,10 @@ private:
|
|||||||
_next_compaction_space = csp;
|
_next_compaction_space = csp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
// MarkSweep support phase2
|
|
||||||
|
|
||||||
// Start the process of compaction of the current space: compute
|
|
||||||
// post-compaction addresses, and insert forwarding pointers. The fields
|
|
||||||
// "cp->gen" and "cp->compaction_space" are the generation and space into
|
|
||||||
// which we are currently compacting. This call updates "cp" as necessary,
|
|
||||||
// and leaves the "compaction_top" of the final value of
|
|
||||||
// "cp->compaction_space" up-to-date. Offset tables may be updated in
|
|
||||||
// this phase as if the final copy had occurred; if so, "cp->threshold"
|
|
||||||
// indicates when the next such action should be taken.
|
|
||||||
void prepare_for_compaction(CompactPoint* cp);
|
|
||||||
// MarkSweep support phase3
|
|
||||||
void adjust_pointers() override;
|
|
||||||
// MarkSweep support phase4
|
|
||||||
virtual void compact();
|
|
||||||
#endif // INCLUDE_SERIALGC
|
|
||||||
|
|
||||||
// The maximum percentage of objects that can be dead in the compacted
|
// The maximum percentage of objects that can be dead in the compacted
|
||||||
// live part of a compacted space ("deadwood" support.)
|
// live part of a compacted space ("deadwood" support.)
|
||||||
virtual size_t allowed_dead_ratio() const { return 0; };
|
virtual size_t allowed_dead_ratio() const { return 0; };
|
||||||
|
|
||||||
// "q" is an object of the given "size" that should be forwarded;
|
|
||||||
// "cp" names the generation ("gen") and containing "this" (which must
|
|
||||||
// also equal "cp->space"). "compact_top" is where in "this" the
|
|
||||||
// next object should be forwarded to. If there is room in "this" for
|
|
||||||
// the object, insert an appropriate forwarding pointer in "q".
|
|
||||||
// If not, go to the next compaction space (there must
|
|
||||||
// be one, since compaction must succeed -- we go to the first space of
|
|
||||||
// the previous generation if necessary, updating "cp"), reset compact_top
|
|
||||||
// and then forward. In either case, returns the new value of "compact_top".
|
|
||||||
// Invokes the "update_for_block" function of the then-current compaction
|
|
||||||
// space.
|
|
||||||
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
|
|
||||||
HeapWord* compact_top);
|
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
HeapWord* top() const { return _top; }
|
HeapWord* top() const { return _top; }
|
||||||
void set_top(HeapWord* value) { _top = value; }
|
void set_top(HeapWord* value) { _top = value; }
|
||||||
@ -359,12 +287,6 @@ private:
|
|||||||
// Iteration
|
// Iteration
|
||||||
void object_iterate(ObjectClosure* blk) override;
|
void object_iterate(ObjectClosure* blk) override;
|
||||||
|
|
||||||
// Compaction support
|
|
||||||
void reset_after_compaction() {
|
|
||||||
assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
|
|
||||||
set_top(compaction_top());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply "blk->do_oop" to the addresses of all reference fields in objects
|
// Apply "blk->do_oop" to the addresses of all reference fields in objects
|
||||||
// starting with the _saved_mark_word, which was noted during a generation's
|
// starting with the _saved_mark_word, which was noted during a generation's
|
||||||
// save_marks and is required to denote the head of an object.
|
// save_marks and is required to denote the head of an object.
|
||||||
@ -419,8 +341,7 @@ class TenuredSpace: public ContiguousSpace {
|
|||||||
inline HeapWord* allocate(size_t word_size) override;
|
inline HeapWord* allocate(size_t word_size) override;
|
||||||
inline HeapWord* par_allocate(size_t word_size) override;
|
inline HeapWord* par_allocate(size_t word_size) override;
|
||||||
|
|
||||||
// MarkSweep support phase3
|
inline void update_for_block(HeapWord* start, HeapWord* end);
|
||||||
void update_for_block(HeapWord* start, HeapWord* end) override;
|
|
||||||
|
|
||||||
void print_on(outputStream* st) const override;
|
void print_on(outputStream* st) const override;
|
||||||
};
|
};
|
||||||
|
@ -27,17 +27,12 @@
|
|||||||
|
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.hpp"
|
||||||
|
|
||||||
#include "gc/serial/generation.hpp"
|
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "gc/shared/spaceDecorator.hpp"
|
#include "gc/shared/spaceDecorator.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "oops/oopsHierarchy.hpp"
|
#include "oops/oopsHierarchy.hpp"
|
||||||
#include "runtime/prefetch.inline.hpp"
|
#include "runtime/prefetch.inline.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#if INCLUDE_SERIALGC
|
|
||||||
#include "gc/serial/serialBlockOffsetTable.inline.hpp"
|
|
||||||
#include "gc/serial/markSweep.inline.hpp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
inline HeapWord* Space::block_start(const void* p) {
|
inline HeapWord* Space::block_start(const void* p) {
|
||||||
return block_start_const(p);
|
return block_start_const(p);
|
||||||
@ -60,90 +55,8 @@ inline HeapWord* TenuredSpace::par_allocate(size_t size) {
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
class DeadSpacer : StackObj {
|
inline void TenuredSpace::update_for_block(HeapWord* start, HeapWord* end) {
|
||||||
size_t _allowed_deadspace_words;
|
_offsets.update_for_block(start, end);
|
||||||
bool _active;
|
|
||||||
ContiguousSpace* _space;
|
|
||||||
|
|
||||||
public:
|
|
||||||
DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
|
|
||||||
size_t ratio = _space->allowed_dead_ratio();
|
|
||||||
_active = ratio > 0;
|
|
||||||
|
|
||||||
if (_active) {
|
|
||||||
assert(!UseG1GC, "G1 should not be using dead space");
|
|
||||||
|
|
||||||
// We allow some amount of garbage towards the bottom of the space, so
|
|
||||||
// we don't start compacting before there is a significant gain to be made.
|
|
||||||
// Occasionally, we want to ensure a full compaction, which is determined
|
|
||||||
// by the MarkSweepAlwaysCompactCount parameter.
|
|
||||||
if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
|
|
||||||
_allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
|
|
||||||
} else {
|
|
||||||
_active = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
|
|
||||||
if (!_active) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t dead_length = pointer_delta(dead_end, dead_start);
|
|
||||||
if (_allowed_deadspace_words >= dead_length) {
|
|
||||||
_allowed_deadspace_words -= dead_length;
|
|
||||||
CollectedHeap::fill_with_object(dead_start, dead_length);
|
|
||||||
oop obj = cast_to_oop(dead_start);
|
|
||||||
obj->set_mark(obj->mark().set_marked());
|
|
||||||
|
|
||||||
assert(dead_length == obj->size(), "bad filler object size");
|
|
||||||
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
|
|
||||||
p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
_active = false;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContiguousSpace::verify_up_to_first_dead(ContiguousSpace* space) {
|
|
||||||
HeapWord* cur_obj = space->bottom();
|
|
||||||
|
|
||||||
if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
|
|
||||||
// we have a chunk of the space which hasn't moved and we've reinitialized
|
|
||||||
// the mark word during the previous pass, so we can't use is_gc_marked for
|
|
||||||
// the traversal.
|
|
||||||
HeapWord* prev_obj = nullptr;
|
|
||||||
|
|
||||||
while (cur_obj < space->_first_dead) {
|
|
||||||
size_t size = cast_to_oop(cur_obj)->size();
|
|
||||||
assert(!cast_to_oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
|
||||||
prev_obj = cur_obj;
|
|
||||||
cur_obj += size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
inline void ContiguousSpace::clear_empty_region(ContiguousSpace* space) {
|
|
||||||
// Let's remember if we were empty before we did the compaction.
|
|
||||||
bool was_empty = space->used_region().is_empty();
|
|
||||||
// Reset space after compaction is complete
|
|
||||||
space->reset_after_compaction();
|
|
||||||
// We do this clear, below, since it has overloaded meanings for some
|
|
||||||
// space subtypes. For example, TenuredSpace's that were
|
|
||||||
// compacted into will have had their offset table thresholds updated
|
|
||||||
// continuously, but those that weren't need to have their thresholds
|
|
||||||
// re-initialized. Also mangles unused area for debugging.
|
|
||||||
if (space->used_region().is_empty()) {
|
|
||||||
if (!was_empty) space->clear(SpaceDecorator::Mangle);
|
|
||||||
} else {
|
|
||||||
if (ZapUnusedHeapArea) space->mangle_unused_area();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_SERIALGC
|
#endif // INCLUDE_SERIALGC
|
||||||
|
|
||||||
|
@ -99,10 +99,6 @@
|
|||||||
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
|
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
|
||||||
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
|
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
|
||||||
\
|
\
|
||||||
nonstatic_field(ContiguousSpace, _compaction_top, HeapWord*) \
|
|
||||||
nonstatic_field(ContiguousSpace, _first_dead, HeapWord*) \
|
|
||||||
nonstatic_field(ContiguousSpace, _end_of_live, HeapWord*) \
|
|
||||||
\
|
|
||||||
nonstatic_field(ContiguousSpace, _top, HeapWord*) \
|
nonstatic_field(ContiguousSpace, _top, HeapWord*) \
|
||||||
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
|
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
|
||||||
\
|
\
|
||||||
|
Loading…
Reference in New Issue
Block a user