8274516: [REDO] JDK-8271880: Tighten condition for excluding regions from collecting cards with cross-references
Reviewed-by: sjohanss, ayang
This commit is contained in:
parent
337b73a459
commit
c3b75c6cdf
src/hotspot/share/gc
g1
g1CollectedHeap.cppg1CollectedHeap.hppg1CollectedHeap.inline.hppg1EvacFailure.cppg1EvacFailure.hppg1HeapRegionAttr.hppg1OopClosures.hppg1ParScanThreadState.cppg1ParScanThreadState.hppg1ParScanThreadState.inline.hppg1YoungCollector.cppg1YoungCollector.hppg1YoungGCPostEvacuateTasks.cppg1YoungGCPostEvacuateTasks.hpp
shared
@ -3272,6 +3272,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
||||
new_alloc_region->set_survivor();
|
||||
_survivor.add(new_alloc_region);
|
||||
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
|
||||
register_new_survivor_region_with_region_attr(new_alloc_region);
|
||||
} else {
|
||||
new_alloc_region->set_old();
|
||||
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
|
||||
|
@ -605,6 +605,7 @@ public:
|
||||
void register_young_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_in_young(r->hrm_index());
|
||||
}
|
||||
inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
|
||||
inline void register_region_with_region_attr(HeapRegion* r);
|
||||
inline void register_old_region_with_region_attr(HeapRegion* r);
|
||||
inline void register_optional_region_with_region_attr(HeapRegion* r);
|
||||
|
@ -184,6 +184,10 @@ void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
|
||||
_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_new_survivor_region(r->hrm_index());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
|
||||
}
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1RedirtyCardsQueue.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
@ -38,65 +37,23 @@
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
class UpdateLogBuffersDeferred : public BasicOopIterateClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1RedirtyCardsLocalQueueSet* _rdc_local_qset;
|
||||
G1CardTable* _ct;
|
||||
|
||||
// Remember the last enqueued card to avoid enqueuing the same card over and over;
|
||||
// since we only ever handle a card once, this is sufficient.
|
||||
size_t _last_enqueued_card;
|
||||
|
||||
public:
|
||||
UpdateLogBuffersDeferred(G1RedirtyCardsLocalQueueSet* rdc_local_qset) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_rdc_local_qset(rdc_local_qset),
|
||||
_ct(_g1h->card_table()),
|
||||
_last_enqueued_card(SIZE_MAX) {}
|
||||
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_g1h->heap_region_containing(p)->is_in_reserved(p), "paranoia");
|
||||
assert(!_g1h->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
|
||||
|
||||
T const o = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(o)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
|
||||
return;
|
||||
}
|
||||
size_t card_index = _ct->index_for(p);
|
||||
if (card_index != _last_enqueued_card) {
|
||||
_rdc_local_qset->enqueue(_ct->byte_for_index(card_index));
|
||||
_last_enqueued_card = card_index;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
HeapRegion* _hr;
|
||||
size_t _marked_bytes;
|
||||
UpdateLogBuffersDeferred* _log_buffer_cl;
|
||||
bool _during_concurrent_start;
|
||||
uint _worker_id;
|
||||
HeapWord* _last_forwarded_object_end;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
|
||||
UpdateLogBuffersDeferred* log_buffer_cl,
|
||||
bool during_concurrent_start,
|
||||
uint worker_id) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_hr(hr),
|
||||
_marked_bytes(0),
|
||||
_log_buffer_cl(log_buffer_cl),
|
||||
_during_concurrent_start(during_concurrent_start),
|
||||
_worker_id(worker_id),
|
||||
_last_forwarded_object_end(hr->bottom()) { }
|
||||
@ -141,20 +98,6 @@ public:
|
||||
_marked_bytes += (obj_size * HeapWordSize);
|
||||
PreservedMarks::init_forwarded_mark(obj);
|
||||
|
||||
// While we were processing RSet buffers during the collection,
|
||||
// we actually didn't scan any cards on the collection set,
|
||||
// since we didn't want to update remembered sets with entries
|
||||
// that point into the collection set, given that live objects
|
||||
// from the collection set are about to move and such entries
|
||||
// will be stale very soon.
|
||||
// This change also dealt with a reliability issue which
|
||||
// involved scanning a card in the collection set and coming
|
||||
// across an array that was being chunked and looking malformed.
|
||||
// The problem is that, if evacuation fails, we might have
|
||||
// remembered set entries missing given that we skipped cards on
|
||||
// the collection set. So, we'll recreate such entries now.
|
||||
obj->oop_iterate(_log_buffer_cl);
|
||||
|
||||
HeapWord* obj_end = obj_addr + obj_size;
|
||||
_last_forwarded_object_end = obj_end;
|
||||
_hr->alloc_block_in_bot(obj_addr, obj_end);
|
||||
@ -203,33 +146,22 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
uint _worker_id;
|
||||
|
||||
G1RedirtyCardsLocalQueueSet _rdc_local_qset;
|
||||
UpdateLogBuffersDeferred _log_buffer_cl;
|
||||
|
||||
uint volatile* _num_failed_regions;
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs,
|
||||
uint worker_id,
|
||||
RemoveSelfForwardPtrHRClosure(uint worker_id,
|
||||
uint volatile* num_failed_regions,
|
||||
G1EvacFailureRegions* evac_failure_regions) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_worker_id(worker_id),
|
||||
_rdc_local_qset(rdcqs),
|
||||
_log_buffer_cl(&_rdc_local_qset),
|
||||
_num_failed_regions(num_failed_regions),
|
||||
_evac_failure_regions(evac_failure_regions) {
|
||||
}
|
||||
|
||||
~RemoveSelfForwardPtrHRClosure() {
|
||||
_rdc_local_qset.flush();
|
||||
}
|
||||
|
||||
size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
|
||||
bool during_concurrent_start) {
|
||||
RemoveSelfForwardPtrObjClosure rspc(hr,
|
||||
&_log_buffer_cl,
|
||||
during_concurrent_start,
|
||||
_worker_id);
|
||||
hr->object_iterate(&rspc);
|
||||
@ -268,17 +200,15 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs,
|
||||
G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_rdcqs(rdcqs),
|
||||
_hrclaimer(_g1h->workers()->active_workers()),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_num_failed_regions(0) { }
|
||||
|
||||
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id, &_num_failed_regions, _evac_failure_regions);
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_num_failed_regions, _evac_failure_regions);
|
||||
|
||||
// Iterate through all regions that failed evacuation during the entire collection.
|
||||
_evac_failure_regions->par_iterate(&rsfp_cl, &_hrclaimer, worker_id);
|
||||
|
@ -32,21 +32,19 @@
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1EvacFailureRegions;
|
||||
class G1RedirtyCardsQueueSet;
|
||||
|
||||
// Task to fixup self-forwarding pointers
|
||||
// installed as a result of an evacuation failure.
|
||||
class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1RedirtyCardsQueueSet* _rdcqs;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
uint volatile _num_failed_regions;
|
||||
|
||||
public:
|
||||
G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs, G1EvacFailureRegions* evac_failure_regions);
|
||||
G1ParRemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions);
|
||||
|
||||
void work(uint worker_id);
|
||||
|
||||
|
@ -57,8 +57,9 @@ public:
|
||||
//
|
||||
// The other values are used for objects in regions requiring various special handling,
|
||||
// eager reclamation of humongous objects or optional regions.
|
||||
static const region_type_t Optional = -3; // The region is optional not in the current collection set.
|
||||
static const region_type_t Humongous = -2; // The region is a humongous candidate not in the current collection set.
|
||||
static const region_type_t Optional = -4; // The region is optional not in the current collection set.
|
||||
static const region_type_t Humongous = -3; // The region is a humongous candidate not in the current collection set.
|
||||
static const region_type_t NewSurvivor = -2; // The region is a new (ly allocated) survivor region.
|
||||
static const region_type_t NotInCSet = -1; // The region is not in the collection set.
|
||||
static const region_type_t Young = 0; // The region is in the collection set and a young region.
|
||||
static const region_type_t Old = 1; // The region is in the collection set and an old region.
|
||||
@ -76,6 +77,7 @@ public:
|
||||
switch (type()) {
|
||||
case Optional: return "Optional";
|
||||
case Humongous: return "Humongous";
|
||||
case NewSurvivor: return "NewSurvivor";
|
||||
case NotInCSet: return "NotInCSet";
|
||||
case Young: return "Young";
|
||||
case Old: return "Old";
|
||||
@ -85,6 +87,7 @@ public:
|
||||
|
||||
bool needs_remset_update() const { return _needs_remset_update != 0; }
|
||||
|
||||
void set_new_survivor() { _type = NewSurvivor; }
|
||||
void set_old() { _type = Old; }
|
||||
void clear_humongous() {
|
||||
assert(is_humongous() || !is_in_cset(), "must be");
|
||||
@ -96,6 +99,7 @@ public:
|
||||
bool is_in_cset() const { return type() >= Young; }
|
||||
|
||||
bool is_humongous() const { return type() == Humongous; }
|
||||
bool is_new_survivor() const { return type() == NewSurvivor; }
|
||||
bool is_young() const { return type() == Young; }
|
||||
bool is_old() const { return type() == Old; }
|
||||
bool is_optional() const { return type() == Optional; }
|
||||
@ -128,6 +132,12 @@ class G1HeapRegionAttrBiasedMappedArray : public G1BiasedMappedArray<G1HeapRegio
|
||||
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Optional, needs_remset_update));
|
||||
}
|
||||
|
||||
void set_new_survivor_region(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
get_ref_by_index(index)->set_new_survivor();
|
||||
}
|
||||
|
||||
void set_humongous(uintptr_t index, bool needs_remset_update) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
|
@ -116,9 +116,9 @@ class G1SkipCardEnqueueSetter : public StackObj {
|
||||
G1ScanEvacuatedObjClosure* _closure;
|
||||
|
||||
public:
|
||||
G1SkipCardEnqueueSetter(G1ScanEvacuatedObjClosure* closure, bool new_value) : _closure(closure) {
|
||||
G1SkipCardEnqueueSetter(G1ScanEvacuatedObjClosure* closure, bool skip_card_enqueue) : _closure(closure) {
|
||||
assert(_closure->_skip_card_enqueue == G1ScanEvacuatedObjClosure::Uninitialized, "Must not be set");
|
||||
_closure->_skip_card_enqueue = new_value ? G1ScanEvacuatedObjClosure::True : G1ScanEvacuatedObjClosure::False;
|
||||
_closure->_skip_card_enqueue = skip_card_enqueue ? G1ScanEvacuatedObjClosure::True : G1ScanEvacuatedObjClosure::False;
|
||||
}
|
||||
|
||||
~G1SkipCardEnqueueSetter() {
|
||||
|
@ -235,8 +235,8 @@ void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
|
||||
push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
|
||||
}
|
||||
|
||||
HeapRegion* hr = _g1h->heap_region_containing(to_array);
|
||||
G1SkipCardEnqueueSetter x(&_scanner, hr->is_young());
|
||||
G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
|
||||
G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
|
||||
// Process claimed task. The length of to_array is not correct, but
|
||||
// fortunately the iteration ignores the length field and just relies
|
||||
// on start/end.
|
||||
@ -268,6 +268,11 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
|
||||
push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
|
||||
}
|
||||
|
||||
// Skip the card enqueue iff the object (to_array) is in survivor region.
|
||||
// However, HeapRegion::is_survivor() is too expensive here.
|
||||
// Instead, we use dest_attr.is_young() because the two values are always
|
||||
// equal: successfully allocated young regions must be survivor regions.
|
||||
assert(dest_attr.is_young() == _g1h->heap_region_containing(to_array)->is_survivor(), "must be");
|
||||
G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
|
||||
// Process the initial chunk. No need to process the type in the
|
||||
// klass, as it will already be handled by processing the built-in
|
||||
@ -519,6 +524,11 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
|
||||
_string_dedup_requests.add(old);
|
||||
}
|
||||
|
||||
// Skip the card enqueue iff the object (obj) is in survivor region.
|
||||
// However, HeapRegion::is_survivor() is too expensive here.
|
||||
// Instead, we use dest_attr.is_young() because the two values are always
|
||||
// equal: successfully allocated young regions must be survivor regions.
|
||||
assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
|
||||
G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
|
||||
obj->oop_iterate_backwards(&_scanner, klass);
|
||||
return obj;
|
||||
@ -605,7 +615,14 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
|
||||
_preserved_marks->push_if_necessary(old, m);
|
||||
_evacuation_failed_info.register_copy_failure(word_sz);
|
||||
|
||||
G1SkipCardEnqueueSetter x(&_scanner, r->is_young());
|
||||
// For iterating objects that failed evacuation currently we can reuse the
|
||||
// existing closure to scan evacuated objects because:
|
||||
// - for objects referring into the collection set we do not need to gather
|
||||
// cards at this time. The regions they are in will be unconditionally turned
|
||||
// to old regions without remembered sets.
|
||||
// - since we are iterating from a collection set region (i.e. never a Survivor
|
||||
// region), we always need to gather cards for this case.
|
||||
G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */);
|
||||
old->oop_iterate_backwards(&_scanner);
|
||||
|
||||
return old;
|
||||
|
@ -134,7 +134,7 @@ public:
|
||||
|
||||
// Apply the post barrier to the given reference field. Enqueues the card of p
|
||||
// if the barrier does not filter out the reference for some reason (e.g.
|
||||
// p and q are in the same region, p is in survivor)
|
||||
// p and q are in the same region, p is in survivor, p is in collection set)
|
||||
// To be called during GC if nothing particular about p and obj are known.
|
||||
template <class T> void write_ref_field_post(T* p, oop obj);
|
||||
|
||||
|
@ -97,19 +97,34 @@ G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const Heap
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj) {
|
||||
assert(obj != NULL, "Must be");
|
||||
assert(obj != nullptr, "Must be");
|
||||
if (HeapRegion::is_in_same_region(p, obj)) {
|
||||
return;
|
||||
}
|
||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
||||
if (!from->is_young()) {
|
||||
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
|
||||
G1HeapRegionAttr from_attr = _g1h->region_attr(p);
|
||||
// If this is a reference from (current) survivor regions, we do not need
|
||||
// to track references from it.
|
||||
if (from_attr.is_new_survivor()) {
|
||||
return;
|
||||
}
|
||||
G1HeapRegionAttr dest_attr = _g1h->region_attr(obj);
|
||||
// References to the current collection set are references to objects that failed
|
||||
// evacuation. Currently these regions are always relabelled as old without
|
||||
// remembered sets, so skip them.
|
||||
assert(dest_attr.is_in_cset() == (obj->forwardee() == obj),
|
||||
"Only evac-failed objects must be in the collection set here but " PTR_FORMAT " is not", p2i(obj));
|
||||
if (dest_attr.is_in_cset()) {
|
||||
return;
|
||||
}
|
||||
enqueue_card_if_tracked(dest_attr, p, obj);
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
|
||||
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
|
||||
assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
|
||||
assert(!_g1h->heap_region_containing(p)->is_survivor(), "Should have filtered out from-newly allocated survivor references already.");
|
||||
// We relabel all regions that failed evacuation as old gen without remembered,
|
||||
// and so pre-filter them out in the caller.
|
||||
assert(!_g1h->heap_region_containing(o)->in_collection_set(), "Should not try to enqueue reference into collection set region");
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapRegion* const hr_obj = _g1h->heap_region_containing(o);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "classfile/classLoaderDataGraph.inline.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1CardSetMemory.hpp"
|
||||
@ -911,6 +912,31 @@ class G1STWRefProcProxyTask : public RefProcProxyTask {
|
||||
TaskTerminator _terminator;
|
||||
G1ScannerTasksQueueSet& _task_queues;
|
||||
|
||||
// Special closure for enqueuing discovered fields: during enqueue the card table
|
||||
// may not be in shape to properly handle normal barrier calls (e.g. card marks
|
||||
// in regions that failed evacuation, scribbling of various values by card table
|
||||
// scan code). Additionally the regular barrier enqueues into the "global"
|
||||
// DCQS, but during GC we need these to-be-refined entries in the GC local queue
|
||||
// so that after clearing the card table, the redirty cards phase will properly
|
||||
// mark all dirty cards to be picked up by refinement.
|
||||
class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadState* _pss;
|
||||
|
||||
public:
|
||||
G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
|
||||
|
||||
void enqueue(HeapWord* discovered_field_addr, oop value) override {
|
||||
assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
|
||||
// Store the value first, whatever it is.
|
||||
RawAccess<>::oop_store(discovered_field_addr, value);
|
||||
if (value == nullptr) {
|
||||
return;
|
||||
}
|
||||
_pss->write_ref_field_post(discovered_field_addr, value);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
|
||||
: RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
|
||||
@ -928,7 +954,7 @@ public:
|
||||
|
||||
G1STWIsAliveClosure is_alive(&_g1h);
|
||||
G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
|
||||
BarrierEnqueueDiscoveredFieldClosure enqueue;
|
||||
G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
|
||||
G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
|
||||
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
|
||||
|
||||
|
@ -142,7 +142,6 @@ class G1YoungCollector {
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
public:
|
||||
|
||||
G1YoungCollector(GCCause::Cause gc_cause,
|
||||
double target_pause_time_ms);
|
||||
void collect();
|
||||
|
@ -102,9 +102,9 @@ class G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask : publi
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs, G1EvacFailureRegions* evac_failure_regions) :
|
||||
RemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1AbstractSubTask(G1GCPhaseTimes::RemoveSelfForwardingPtr),
|
||||
_task(rdcqs, evac_failure_regions),
|
||||
_task(evac_failure_regions),
|
||||
_evac_failure_regions(evac_failure_regions) { }
|
||||
|
||||
~RemoveSelfForwardPtrsTask() {
|
||||
@ -135,7 +135,7 @@ G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1
|
||||
add_serial_task(new SampleCollectionSetCandidatesTask());
|
||||
}
|
||||
if (evacuation_failed) {
|
||||
add_parallel_task(new RemoveSelfForwardPtrsTask(per_thread_states->rdcqs(), evac_failure_regions));
|
||||
add_parallel_task(new RemoveSelfForwardPtrsTask(evac_failure_regions));
|
||||
}
|
||||
add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ class G1CollectedHeap;
|
||||
class G1EvacFailureRegions;
|
||||
class G1EvacInfo;
|
||||
class G1ParScanThreadStateSet;
|
||||
class G1RedirtyCardsQueueSet;
|
||||
|
||||
// First set of post evacuate collection set tasks containing ("s" means serial):
|
||||
// - Merge PSS (s)
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/nonJavaThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
|
||||
ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
|
||||
@ -218,10 +219,10 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(RefPro
|
||||
return stats;
|
||||
}
|
||||
|
||||
void BarrierEnqueueDiscoveredFieldClosure::enqueue(oop reference, oop value) {
|
||||
HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(reference,
|
||||
java_lang_ref_Reference::discovered_offset(),
|
||||
value);
|
||||
void BarrierEnqueueDiscoveredFieldClosure::enqueue(HeapWord* discovered_field_addr, oop value) {
|
||||
assert(Universe::heap()->is_in(discovered_field_addr), PTR_FORMAT " not in heap", p2i(discovered_field_addr));
|
||||
HeapAccess<AS_NO_KEEPALIVE>::oop_store(discovered_field_addr,
|
||||
value);
|
||||
}
|
||||
|
||||
void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
|
||||
@ -255,9 +256,8 @@ void DiscoveredListIterator::remove() {
|
||||
} else {
|
||||
new_next = _next_discovered;
|
||||
}
|
||||
// Remove Reference object from discovered list. Note that G1 does not need a
|
||||
// pre-barrier here because we know the Reference has already been found/marked,
|
||||
// that's how it ended up in the discovered list in the first place.
|
||||
// Remove Reference object from discovered list. We do not need barriers here,
|
||||
// as we only remove. We will do the barrier when we actually advance the cursor.
|
||||
RawAccess<>::oop_store(_prev_discovered_addr, new_next);
|
||||
_removed++;
|
||||
_refs_list.dec_length(1);
|
||||
@ -277,7 +277,11 @@ void DiscoveredListIterator::clear_referent() {
|
||||
}
|
||||
|
||||
void DiscoveredListIterator::enqueue() {
|
||||
_enqueue->enqueue(_current_discovered, _next_discovered);
|
||||
if (_prev_discovered_addr != _refs_list.adr_head()) {
|
||||
_enqueue->enqueue(_prev_discovered_addr, _current_discovered);
|
||||
} else {
|
||||
RawAccess<>::oop_store(_prev_discovered_addr, _current_discovered);
|
||||
}
|
||||
}
|
||||
|
||||
void DiscoveredListIterator::complete_enqueue() {
|
||||
@ -286,7 +290,7 @@ void DiscoveredListIterator::complete_enqueue() {
|
||||
// Swap refs_list into pending list and set obj's
|
||||
// discovered to what we read from the pending list.
|
||||
oop old = Universe::swap_reference_pending_list(_refs_list.head());
|
||||
_enqueue->enqueue(_prev_discovered, old);
|
||||
_enqueue->enqueue(java_lang_ref_Reference::discovered_addr_raw(_prev_discovered), old);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,15 +47,16 @@ class RefProcProxyTask;
|
||||
// at the point of invocation.
|
||||
class EnqueueDiscoveredFieldClosure {
|
||||
public:
|
||||
// For the given j.l.ref.Reference reference, set the discovered field to value.
|
||||
virtual void enqueue(oop reference, oop value) = 0;
|
||||
// For the given j.l.ref.Reference discovered field address, set the discovered
|
||||
// field to value and apply any barriers to it.
|
||||
virtual void enqueue(HeapWord* discovered_field_addr, oop value) = 0;
|
||||
};
|
||||
|
||||
// EnqueueDiscoveredFieldClosure that executes the default barrier on the discovered
|
||||
// field of the j.l.ref.Reference reference with the given value.
|
||||
// field of the j.l.ref.Reference with the given value.
|
||||
class BarrierEnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
|
||||
public:
|
||||
void enqueue(oop reference, oop value) override;
|
||||
void enqueue(HeapWord* discovered_field_addr, oop value) override;
|
||||
};
|
||||
|
||||
// List of discovered references.
|
||||
|
Loading…
x
Reference in New Issue
Block a user