8289822: G1: Make concurrent mark code owner of TAMSes
Reviewed-by: ayang, iwalulya
This commit is contained in:
parent
5b6b514441
commit
f1c69ccadb
@ -1174,7 +1174,7 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_is_alive_closure_stw(this),
|
||||
_is_subject_to_discovery_stw(this),
|
||||
_ref_processor_cm(nullptr),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_alive_closure_cm(),
|
||||
_is_subject_to_discovery_cm(this),
|
||||
_region_attr() {
|
||||
|
||||
@ -1505,6 +1505,7 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// * Discovery is atomic - i.e. not concurrent.
|
||||
// * Reference discovery will not need a barrier.
|
||||
|
||||
_is_alive_closure_cm.initialize(concurrent_mark());
|
||||
// Concurrent Mark ref processor
|
||||
_ref_processor_cm =
|
||||
new ReferenceProcessor(&_is_subject_to_discovery_cm,
|
||||
|
@ -253,9 +253,10 @@ public:
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
|
||||
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
|
||||
_st->print_cr(" " HR_FORMAT ", TAMS: " PTR_FORMAT " PB: " PTR_FORMAT ", age: %4d",
|
||||
HR_FORMAT_PARAMS(r),
|
||||
p2i(r->top_at_mark_start()),
|
||||
p2i(cm->top_at_mark_start(r)),
|
||||
p2i(r->parsable_bottom()),
|
||||
r->has_surv_rate_group() ? checked_cast<int>(r->age_in_surv_rate_group()) : -1);
|
||||
return false;
|
||||
|
@ -78,6 +78,18 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
G1CMIsAliveClosure::G1CMIsAliveClosure() : _cm(nullptr) { }
|
||||
|
||||
G1CMIsAliveClosure::G1CMIsAliveClosure(G1ConcurrentMark* cm) : _cm(cm) {
|
||||
assert(cm != nullptr, "must be");
|
||||
}
|
||||
|
||||
void G1CMIsAliveClosure::initialize(G1ConcurrentMark* cm) {
|
||||
assert(cm != nullptr, "must be");
|
||||
assert(_cm == nullptr, "double initialize");
|
||||
_cm = cm;
|
||||
}
|
||||
|
||||
bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
|
||||
assert(addr < _cm->finger(), "invariant");
|
||||
assert(addr >= _task->finger(), "invariant");
|
||||
@ -502,6 +514,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
_max_concurrent_workers(0),
|
||||
|
||||
_region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_reserved_regions(), mtGC)),
|
||||
_top_at_mark_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_reserved_regions(), mtGC)),
|
||||
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_reserved_regions(), mtGC)),
|
||||
_needs_remembered_set_rebuild(false)
|
||||
{
|
||||
@ -648,6 +661,7 @@ void G1ConcurrentMark::reset_at_marking_complete() {
|
||||
}
|
||||
|
||||
G1ConcurrentMark::~G1ConcurrentMark() {
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_mark_starts);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
|
||||
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
|
||||
// The G1ConcurrentMark instance is never freed.
|
||||
@ -693,7 +707,7 @@ private:
|
||||
assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits");
|
||||
return r->bottom();
|
||||
}
|
||||
assert(_bitmap->get_next_marked_addr(r->top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above tams");
|
||||
assert(_bitmap->get_next_marked_addr(_cm->top_at_mark_start(r), r->end()) == r->end(), "Should not have marked bits above tams");
|
||||
}
|
||||
return r->end();
|
||||
}
|
||||
@ -737,7 +751,7 @@ private:
|
||||
}
|
||||
assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
|
||||
|
||||
r->reset_top_at_mark_start();
|
||||
_cm->reset_top_at_mark_start(r);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -849,9 +863,15 @@ void G1PreConcurrentStartTask::ResetMarkingStateTask::do_work(uint worker_id) {
|
||||
}
|
||||
|
||||
class NoteStartOfMarkHRClosure : public HeapRegionClosure {
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
public:
|
||||
NoteStartOfMarkHRClosure() : HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { }
|
||||
|
||||
bool do_heap_region(HeapRegion* r) override {
|
||||
r->note_start_of_marking();
|
||||
if (r->is_old_or_humongous() && !r->is_collection_set_candidate()) {
|
||||
_cm->update_top_at_mark_start(r);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@ -1015,9 +1035,9 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id)
|
||||
#ifdef ASSERT
|
||||
HeapWord* last = region->last();
|
||||
HeapRegion* hr = _g1h->heap_region_containing(last);
|
||||
assert(hr->is_old() || hr->top_at_mark_start() == hr->bottom(),
|
||||
assert(hr->is_old() || top_at_mark_start(hr) == hr->bottom(),
|
||||
"Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
|
||||
assert(hr->top_at_mark_start() == region->start(),
|
||||
assert(top_at_mark_start(hr) == region->start(),
|
||||
"MemRegion start should be equal to TAMS");
|
||||
#endif
|
||||
|
||||
@ -1079,11 +1099,11 @@ bool G1ConcurrentMark::wait_until_root_region_scan_finished() {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::add_root_region(HeapRegion* r) {
|
||||
root_regions()->add(r->top_at_mark_start(), r->top());
|
||||
root_regions()->add(top_at_mark_start(r), r->top());
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::is_root_region(HeapRegion* r) {
|
||||
return root_regions()->contains(MemRegion(r->top_at_mark_start(), r->top()));
|
||||
return root_regions()->contains(MemRegion(top_at_mark_start(r), r->top()));
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::root_region_scan_abort_and_wait() {
|
||||
@ -1250,7 +1270,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
|
||||
if (hr->is_starts_humongous()) {
|
||||
// The liveness of this humongous obj decided by either its allocation
|
||||
// time (allocated after conc-mark-start, i.e. live) or conc-marking.
|
||||
const bool is_live = hr->top_at_mark_start() == hr->bottom()
|
||||
const bool is_live = _cm->top_at_mark_start(hr) == hr->bottom()
|
||||
|| _cm->contains_live_object(hr->hrm_index());
|
||||
if (is_live) {
|
||||
const bool selected_for_rebuild = tracker->update_humongous_before_rebuild(hr);
|
||||
@ -1266,7 +1286,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
|
||||
reclaim_empty_humongous_region(hr);
|
||||
}
|
||||
} else if (hr->is_old()) {
|
||||
hr->note_end_of_marking(_cm->live_bytes(hr->hrm_index()));
|
||||
hr->note_end_of_marking(_cm->top_at_mark_start(hr), _cm->live_bytes(hr->hrm_index()));
|
||||
|
||||
if (hr->live_bytes() != 0) {
|
||||
if (tracker->update_old_before_rebuild(hr)) {
|
||||
@ -1386,7 +1406,7 @@ void G1ConcurrentMark::remark() {
|
||||
|
||||
// Unload Klasses, String, Code Cache, etc.
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
G1CMIsAliveClosure is_alive(_g1h);
|
||||
G1CMIsAliveClosure is_alive(this);
|
||||
_g1h->unload_classes_and_code("Class Unloading", &is_alive, _gc_timer_cm);
|
||||
}
|
||||
|
||||
@ -1640,7 +1660,7 @@ public:
|
||||
|
||||
void work(uint worker_id) override {
|
||||
assert(worker_id < _max_workers, "sanity");
|
||||
G1CMIsAliveClosure is_alive(&_g1h);
|
||||
G1CMIsAliveClosure is_alive(&_cm);
|
||||
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
|
||||
G1CMKeepAliveAndDrainClosure keep_alive(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
|
||||
BarrierEnqueueDiscoveredFieldClosure enqueue;
|
||||
@ -1719,7 +1739,7 @@ void G1ConcurrentMark::weak_refs_work() {
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
|
||||
G1CMIsAliveClosure is_alive(_g1h);
|
||||
G1CMIsAliveClosure is_alive(this);
|
||||
WeakProcessor::weak_oops_do(_g1h->workers(), &is_alive, &do_nothing_cl, 1);
|
||||
}
|
||||
}
|
||||
@ -1898,9 +1918,9 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
if (res == finger && curr_region != nullptr) {
|
||||
// we succeeded
|
||||
HeapWord* bottom = curr_region->bottom();
|
||||
HeapWord* limit = curr_region->top_at_mark_start();
|
||||
HeapWord* limit = top_at_mark_start(curr_region);
|
||||
|
||||
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(curr_region->top_at_mark_start()));
|
||||
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(top_at_mark_start(curr_region)));
|
||||
// notice that _finger == end cannot be guaranteed here since,
|
||||
// someone else might have moved the finger even further
|
||||
assert(_finger >= end, "the finger should have moved forward");
|
||||
@ -2123,7 +2143,7 @@ void G1CMTask::setup_for_region(HeapRegion* hr) {
|
||||
void G1CMTask::update_region_limit() {
|
||||
HeapRegion* hr = _curr_region;
|
||||
HeapWord* bottom = hr->bottom();
|
||||
HeapWord* limit = hr->top_at_mark_start();
|
||||
HeapWord* limit = _cm->top_at_mark_start(hr);
|
||||
|
||||
if (limit == bottom) {
|
||||
// The region was collected underneath our feet.
|
||||
|
@ -41,11 +41,11 @@
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
class ConcurrentGCTimer;
|
||||
class G1ConcurrentMarkThread;
|
||||
class G1CollectedHeap;
|
||||
class G1ConcurrentMark;
|
||||
class G1ConcurrentMarkThread;
|
||||
class G1CMOopClosure;
|
||||
class G1CMTask;
|
||||
class G1ConcurrentMark;
|
||||
class G1OldTracer;
|
||||
class G1RegionToSpaceMapper;
|
||||
class G1SurvivorRegions;
|
||||
@ -96,9 +96,13 @@ typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
|
||||
// are alive. An instance is also embedded into the
|
||||
// reference processor as the _is_alive_non_header field
|
||||
class G1CMIsAliveClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
public:
|
||||
G1CMIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
G1CMIsAliveClosure();
|
||||
G1CMIsAliveClosure(G1ConcurrentMark* cm);
|
||||
void initialize(G1ConcurrentMark* cm);
|
||||
|
||||
bool do_object_b(oop obj);
|
||||
};
|
||||
|
||||
@ -537,6 +541,9 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
// Region statistics gathered during marking.
|
||||
G1RegionMarkStats* _region_mark_stats;
|
||||
// Top pointer for each region at the start of marking. Must be valid for all committed
|
||||
// regions.
|
||||
HeapWord* volatile* _top_at_mark_starts;
|
||||
// Top pointer for each region at the start of the rebuild remembered set process
|
||||
// for regions which remembered sets need to be rebuilt. A null for a given region
|
||||
// means that this region does not be scanned during the rebuilding remembered
|
||||
@ -556,6 +563,16 @@ public:
|
||||
// Set live bytes for concurrent marking.
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
|
||||
|
||||
// Update the TAMS for the given region to the current top.
|
||||
inline void update_top_at_mark_start(HeapRegion* r);
|
||||
// Reset the TAMS for the given region to bottom of that region.
|
||||
inline void reset_top_at_mark_start(HeapRegion* r);
|
||||
|
||||
inline HeapWord* top_at_mark_start(const HeapRegion* r) const;
|
||||
inline HeapWord* top_at_mark_start(uint region) const;
|
||||
// Returns whether the given object been allocated since marking start (i.e. >= TAMS in that region).
|
||||
inline bool obj_allocated_since_mark_start(oop obj) const;
|
||||
|
||||
// Sets the internal top_at_region_start for the given region to current top of the region.
|
||||
inline void update_top_at_rebuild_start(HeapRegion* r);
|
||||
// TARS for the given region during remembered set rebuilding.
|
||||
|
@ -48,14 +48,13 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
// All objects allocated since the start of marking are considered live.
|
||||
if (hr->obj_allocated_since_marking_start(obj)) {
|
||||
if (_cm->obj_allocated_since_mark_start(obj)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// All objects that are marked are live.
|
||||
return _g1h->is_marked(obj);
|
||||
return _cm->is_marked_in_bitmap(obj);
|
||||
}
|
||||
|
||||
inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
@ -66,15 +65,16 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) {
|
||||
HeapRegion* const hr = _g1h->heap_region_containing(obj);
|
||||
|
||||
if (hr->obj_allocated_since_marking_start(obj)) {
|
||||
if (obj_allocated_since_mark_start(obj)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Some callers may have stale objects to mark above TAMS after humongous reclaim.
|
||||
// Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
|
||||
assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->top_at_mark_start()));
|
||||
DEBUG_ONLY(HeapRegion* const hr = _g1h->heap_region_containing(obj);)
|
||||
assert(!hr->is_continues_humongous(),
|
||||
"Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT,
|
||||
p2i(obj), hr->hrm_index(), p2i(top_at_mark_start(hr)));
|
||||
|
||||
bool success = _mark_bitmap.par_mark(obj);
|
||||
if (success) {
|
||||
@ -184,6 +184,31 @@ inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
|
||||
return mr.word_size();
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::update_top_at_mark_start(HeapRegion* r) {
|
||||
uint const region = r->hrm_index();
|
||||
assert(region < _g1h->max_reserved_regions(), "Tried to access TAMS for region %u out of bounds", region);
|
||||
_top_at_mark_starts[region] = r->top();
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::reset_top_at_mark_start(HeapRegion* r) {
|
||||
_top_at_mark_starts[r->hrm_index()] = r->bottom();
|
||||
}
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const HeapRegion* r) const {
|
||||
return top_at_mark_start(r->hrm_index());
|
||||
}
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
|
||||
assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region);
|
||||
return _top_at_mark_starts[region];
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
|
||||
uint const region = _g1h->addr_to_region(obj);
|
||||
assert(region < _g1h->max_reserved_regions(), "obj " PTR_FORMAT " outside heap %u", p2i(obj), region);
|
||||
return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start(region);
|
||||
}
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(HeapRegion* r) const {
|
||||
return _top_at_rebuild_starts[r->hrm_index()];
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
assert(should_rebuild_or_scrub(hr), "must be");
|
||||
|
||||
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT " TAMS: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr)), p2i(hr->top_at_mark_start()));
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr)), p2i(_cm->top_at_mark_start(hr)));
|
||||
|
||||
if (scan_and_scrub_to_pb(hr, hr->bottom(), pb)) {
|
||||
log_trace(gc, marking)("Scan and scrub aborted for region: %u", hr->hrm_index());
|
||||
|
@ -124,7 +124,11 @@ void HeapRegion::hr_clear(bool clear_space) {
|
||||
|
||||
rem_set()->clear();
|
||||
|
||||
init_top_at_mark_start();
|
||||
G1CollectedHeap::heap()->concurrent_mark()->reset_top_at_mark_start(this);
|
||||
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
|
||||
if (clear_space) clear(SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
@ -226,7 +230,6 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
#ifdef ASSERT
|
||||
_containing_set(nullptr),
|
||||
#endif
|
||||
_top_at_mark_start(nullptr),
|
||||
_parsable_bottom(nullptr),
|
||||
_garbage_bytes(0),
|
||||
_young_index_in_cset(-1),
|
||||
@ -414,8 +417,9 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
} else {
|
||||
st->print("| ");
|
||||
}
|
||||
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
|
||||
st->print("|TAMS " PTR_FORMAT "| PB " PTR_FORMAT "| %s ",
|
||||
p2i(top_at_mark_start()), p2i(parsable_bottom_acquire()), rem_set()->get_state_str());
|
||||
p2i(cm->top_at_mark_start(this)), p2i(parsable_bottom_acquire()), rem_set()->get_state_str());
|
||||
if (UseNUMA) {
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
if (node_index() < numa->num_active_nodes()) {
|
||||
|
@ -223,12 +223,6 @@ private:
|
||||
HeapRegionSetBase* _containing_set;
|
||||
#endif // ASSERT
|
||||
|
||||
// The start of the unmarked area. The unmarked area extends from this
|
||||
// word until the top and/or end of the region, and is the part
|
||||
// of the region for which no marking was done, i.e. objects may
|
||||
// have been allocated in this part since the last mark phase.
|
||||
HeapWord* volatile _top_at_mark_start;
|
||||
|
||||
// The area above this limit is fully parsable. This limit
|
||||
// is equal to bottom except
|
||||
//
|
||||
@ -246,8 +240,6 @@ private:
|
||||
// Amount of dead data in the region.
|
||||
size_t _garbage_bytes;
|
||||
|
||||
inline void init_top_at_mark_start();
|
||||
|
||||
// Data for young region survivor prediction.
|
||||
uint _young_index_in_cset;
|
||||
G1SurvRateGroup* _surv_rate_group;
|
||||
@ -352,10 +344,6 @@ public:
|
||||
|
||||
inline bool is_collection_set_candidate() const;
|
||||
|
||||
// Get the start of the unmarked area in this region.
|
||||
HeapWord* top_at_mark_start() const;
|
||||
void set_top_at_mark_start(HeapWord* value);
|
||||
|
||||
// Retrieve parsable bottom; since it may be modified concurrently, outside a
|
||||
// safepoint the _acquire method must be used.
|
||||
HeapWord* parsable_bottom() const;
|
||||
@ -366,20 +354,13 @@ public:
|
||||
// that the collector is about to start or has finished (concurrently)
|
||||
// marking the heap.
|
||||
|
||||
// Notify the region that concurrent marking is starting. Initialize
|
||||
// all fields related to the next marking info.
|
||||
inline void note_start_of_marking();
|
||||
|
||||
// Notify the region that concurrent marking has finished. Passes the number of
|
||||
// bytes between bottom and TAMS.
|
||||
inline void note_end_of_marking(size_t marked_bytes);
|
||||
// Notify the region that concurrent marking has finished. Passes TAMS and the number of
|
||||
// bytes marked between bottom and TAMS.
|
||||
inline void note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes);
|
||||
|
||||
// Notify the region that scrubbing has completed.
|
||||
inline void note_end_of_scrubbing();
|
||||
|
||||
// Notify the region that the (corresponding) bitmap has been cleared.
|
||||
inline void reset_top_at_mark_start();
|
||||
|
||||
// During the concurrent scrubbing phase, can there be any areas with unloaded
|
||||
// classes or dead objects in this region?
|
||||
// This set only includes old regions - humongous regions only
|
||||
@ -540,10 +521,6 @@ public:
|
||||
inline bool is_in_parsable_area(const void* const addr) const;
|
||||
inline static bool is_in_parsable_area(const void* const addr, const void* const pb);
|
||||
|
||||
bool obj_allocated_since_marking_start(oop obj) const {
|
||||
return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start();
|
||||
}
|
||||
|
||||
// Update the region state after a failed evacuation.
|
||||
void handle_evacuation_failure(bool retain);
|
||||
|
||||
|
@ -176,9 +176,6 @@ inline void HeapRegion::prepare_for_full_gc() {
|
||||
|
||||
inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
|
||||
set_top(new_top);
|
||||
// After a compaction the mark bitmap in a movable region is invalid.
|
||||
// But all objects are live, we get this by setting TAMS to bottom.
|
||||
init_top_at_mark_start();
|
||||
|
||||
reset_after_full_gc_common();
|
||||
}
|
||||
@ -186,17 +183,19 @@ inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
|
||||
inline void HeapRegion::reset_skip_compacting_after_full_gc() {
|
||||
assert(!is_free(), "must be");
|
||||
|
||||
_garbage_bytes = 0;
|
||||
|
||||
reset_top_at_mark_start();
|
||||
|
||||
reset_after_full_gc_common();
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_after_full_gc_common() {
|
||||
// After a full gc the mark information in a movable region is invalid. Reset marking
|
||||
// information.
|
||||
G1CollectedHeap::heap()->concurrent_mark()->reset_top_at_mark_start(this);
|
||||
|
||||
// Everything above bottom() is parsable and live.
|
||||
reset_parsable_bottom();
|
||||
|
||||
_garbage_bytes = 0;
|
||||
|
||||
// Clear unused heap memory in debug builds.
|
||||
if (ZapUnusedHeapArea) {
|
||||
mangle_unused_area();
|
||||
@ -266,14 +265,6 @@ inline void HeapRegion::update_bot_for_obj(HeapWord* obj_start, size_t obj_size)
|
||||
_bot_part.update_for_block(obj_start, obj_end);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::top_at_mark_start() const {
|
||||
return Atomic::load(&_top_at_mark_start);
|
||||
}
|
||||
|
||||
inline void HeapRegion::set_top_at_mark_start(HeapWord* value) {
|
||||
Atomic::store(&_top_at_mark_start, value);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::parsable_bottom() const {
|
||||
assert(!is_init_completed() || SafepointSynchronize::is_at_safepoint(), "only during initialization or safepoint");
|
||||
return _parsable_bottom;
|
||||
@ -287,22 +278,15 @@ inline void HeapRegion::reset_parsable_bottom() {
|
||||
Atomic::release_store(&_parsable_bottom, bottom());
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_marking() {
|
||||
assert(top_at_mark_start() == bottom(), "Region's TAMS must always be at bottom");
|
||||
if (is_old_or_humongous() && !is_collection_set_candidate()) {
|
||||
set_top_at_mark_start(top());
|
||||
}
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_marking(size_t marked_bytes) {
|
||||
inline void HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes) {
|
||||
assert_at_safepoint();
|
||||
|
||||
if (top_at_mark_start() != bottom()) {
|
||||
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes;
|
||||
if (top_at_mark_start != bottom()) {
|
||||
_garbage_bytes = byte_size(bottom(), top_at_mark_start) - marked_bytes;
|
||||
}
|
||||
|
||||
if (needs_scrubbing()) {
|
||||
_parsable_bottom = top_at_mark_start();
|
||||
_parsable_bottom = top_at_mark_start;
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,23 +294,6 @@ inline void HeapRegion::note_end_of_scrubbing() {
|
||||
reset_parsable_bottom();
|
||||
}
|
||||
|
||||
inline void HeapRegion::init_top_at_mark_start() {
|
||||
reset_top_at_mark_start();
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_top_at_mark_start() {
|
||||
// We do not need a release store here because
|
||||
//
|
||||
// - if this method is called during concurrent bitmap clearing, we do not read
|
||||
// the bitmap any more for live/dead information (we do not read the bitmap at
|
||||
// all at that point).
|
||||
// - otherwise we reclaim regions only during GC and we do not read tams and the
|
||||
// bitmap concurrently.
|
||||
set_top_at_mark_start(bottom());
|
||||
}
|
||||
|
||||
inline bool HeapRegion::needs_scrubbing() const {
|
||||
return is_old();
|
||||
}
|
||||
|
@ -462,10 +462,12 @@ public:
|
||||
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
|
||||
|
||||
bool part_of_marking = r->is_old_or_humongous() && !r->is_collection_set_candidate();
|
||||
HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
|
||||
|
||||
if (part_of_marking) {
|
||||
guarantee(r->bottom() != r->top_at_mark_start(), "region %u (%s) does not have TAMS set",
|
||||
r->hrm_index(), r->get_short_type_str());
|
||||
guarantee(r->bottom() != top_at_mark_start,
|
||||
"region %u (%s) does not have TAMS set",
|
||||
r->hrm_index(), r->get_short_type_str());
|
||||
size_t marked_bytes = cm->live_bytes(r->hrm_index());
|
||||
|
||||
MarkedBytesClosure cl;
|
||||
@ -475,9 +477,9 @@ public:
|
||||
"region %u (%s) live bytes actual %zu and cache %zu differ",
|
||||
r->hrm_index(), r->get_short_type_str(), cl.marked_bytes(), marked_bytes);
|
||||
} else {
|
||||
guarantee(r->bottom() == r->top_at_mark_start(),
|
||||
guarantee(r->bottom() == top_at_mark_start,
|
||||
"region %u (%s) has TAMS set " PTR_FORMAT " " PTR_FORMAT,
|
||||
r->hrm_index(), r->get_short_type_str(), p2i(r->bottom()), p2i(r->top_at_mark_start()));
|
||||
r->hrm_index(), r->get_short_type_str(), p2i(r->bottom()), p2i(top_at_mark_start));
|
||||
guarantee(cm->live_bytes(r->hrm_index()) == 0,
|
||||
"region %u (%s) has %zu live bytes recorded",
|
||||
r->hrm_index(), r->get_short_type_str(), cm->live_bytes(r->hrm_index()));
|
||||
@ -542,9 +544,10 @@ void G1HeapVerifier::verify_bitmap_clear(bool from_tams) {
|
||||
G1VerifyBitmapClear(bool from_tams) : _from_tams(from_tams) { }
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
G1CMBitMap* bitmap = G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap();
|
||||
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
|
||||
G1CMBitMap* bitmap = cm->mark_bitmap();
|
||||
|
||||
HeapWord* start = _from_tams ? r->top_at_mark_start() : r->bottom();
|
||||
HeapWord* start = _from_tams ? cm->top_at_mark_start(r) : r->bottom();
|
||||
|
||||
HeapWord* mark = bitmap->get_next_marked_addr(start, r->end());
|
||||
guarantee(mark == r->end(), "Found mark at " PTR_FORMAT " in region %u from start " PTR_FORMAT, p2i(mark), r->hrm_index(), p2i(start));
|
||||
|
@ -1133,7 +1133,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
// so the bitmap for the regions in the collection set must be cleared if not already.
|
||||
if (should_clear_region(hr)) {
|
||||
_g1h->clear_bitmap_for_region(hr);
|
||||
hr->reset_top_at_mark_start();
|
||||
_g1h->concurrent_mark()->reset_top_at_mark_start(hr);
|
||||
} else {
|
||||
assert_bitmap_clear(hr, _g1h->concurrent_mark()->mark_bitmap());
|
||||
}
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
@ -119,7 +120,7 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
|
||||
"remset occ %zu "
|
||||
"size %zu)",
|
||||
r->hrm_index(),
|
||||
p2i(r->top_at_mark_start()),
|
||||
p2i(cm->top_at_mark_start(r)),
|
||||
cm->live_bytes(r->hrm_index()),
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->mem_size());
|
||||
|
@ -85,8 +85,8 @@ static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) {
|
||||
assert(g1h->is_in_reserved(entry),
|
||||
"Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
|
||||
|
||||
HeapRegion* region = g1h->heap_region_containing(entry);
|
||||
if (entry >= region->top_at_mark_start()) {
|
||||
G1ConcurrentMark* cm = g1h->concurrent_mark();
|
||||
if (cm->obj_allocated_since_mark_start(cast_to_oop(entry))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -545,12 +545,12 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1ConcurrentMark* cm = g1h->concurrent_mark();
|
||||
|
||||
uint region = r->hrm_index();
|
||||
assert(r->top_at_mark_start() == r->bottom(), "TAMS must not have been set for region %u", region);
|
||||
assert(cm->live_bytes(region) == 0, "Marking live bytes must not be set for region %u", region);
|
||||
HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
|
||||
assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
|
||||
assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
|
||||
|
||||
// Concurrent mark does not mark through regions that we retain (they are root
|
||||
// regions wrt to marking), so we must clear their mark data (tams, bitmap)
|
||||
// regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
|
||||
// set eagerly or during evacuation failure.
|
||||
bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() ||
|
||||
g1h->policy()->should_retain_evac_failed_region(r);
|
||||
@ -559,9 +559,9 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
|
||||
g1h->clear_bitmap_for_region(r);
|
||||
} else {
|
||||
// This evacuation failed region is going to be marked through. Update mark data.
|
||||
r->set_top_at_mark_start(r->top());
|
||||
cm->update_top_at_mark_start(r);
|
||||
cm->set_live_bytes(r->hrm_index(), r->live_bytes());
|
||||
assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), r->top_at_mark_start()) != r->top_at_mark_start(),
|
||||
assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), cm->top_at_mark_start(r)) != cm->top_at_mark_start(r),
|
||||
"Marks must be on bitmap for region %u", r->hrm_index());
|
||||
}
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user