8035330: Remove G1ParScanPartialArrayClosure and G1ParScanHeapEvacClosure
Mentioned closures are actually wrapped methods. This adds confusion to readers, and in this case also increases code size as G1ParScanHeapEvacClosure is part of the oop_oop_iterate() methods. Move them into G1ParScanThreadState as methods. Reviewed-by: stefank
This commit is contained in:
parent
375e6df9ee
commit
483ea400a5
@ -4632,9 +4632,7 @@ bool G1ParScanThreadState::verify_task(StarTask ref) const {
|
||||
#endif // ASSERT
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
assert(_evac_cl != NULL, "not set");
|
||||
assert(_evac_failure_cl != NULL, "not set");
|
||||
assert(_partial_scan_cl != NULL, "not set");
|
||||
|
||||
StarTask ref;
|
||||
do {
|
||||
@ -4830,55 +4828,6 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
|
||||
|
||||
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
_par_scan_state->push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
@ -5020,13 +4969,9 @@ public:
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_stw();
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id, rp);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
|
||||
|
||||
pss.set_evac_closure(&scan_evac_cl);
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
pss.set_partial_scan_closure(&partial_scan_cl);
|
||||
|
||||
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
|
||||
G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
|
||||
@ -5474,14 +5419,9 @@ public:
|
||||
G1STWIsAliveClosure is_alive(_g1h);
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id, NULL);
|
||||
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
|
||||
|
||||
pss.set_evac_closure(&scan_evac_cl);
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
pss.set_partial_scan_closure(&partial_scan_cl);
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
|
||||
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
|
||||
@ -5586,13 +5526,9 @@ public:
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState pss(_g1h, worker_id, NULL);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
|
||||
|
||||
pss.set_evac_closure(&scan_evac_cl);
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
pss.set_partial_scan_closure(&partial_scan_cl);
|
||||
|
||||
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
|
||||
|
||||
@ -5716,13 +5652,9 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
// We do not embed a reference processor in the copying/scanning
|
||||
// closures while we're actually processing the discovered
|
||||
// reference objects.
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
|
||||
|
||||
pss.set_evac_closure(&scan_evac_cl);
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
pss.set_partial_scan_closure(&partial_scan_cl);
|
||||
|
||||
assert(pss.refs()->is_empty(), "pre-condition");
|
||||
|
||||
|
@ -1779,8 +1779,6 @@ protected:
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
G1ParScanHeapEvacClosure* _evac_cl;
|
||||
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
||||
|
||||
int _hash_seed;
|
||||
uint _queue_num;
|
||||
@ -1908,14 +1906,6 @@ public:
|
||||
return _evac_failure_cl;
|
||||
}
|
||||
|
||||
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
||||
_evac_cl = evac_cl;
|
||||
}
|
||||
|
||||
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
||||
_partial_scan_cl = partial_scan_cl;
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
uint queue_num() { return _queue_num; }
|
||||
|
||||
@ -1963,19 +1953,121 @@ public:
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
||||
private:
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) const {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) const {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) const {
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
void do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
|
||||
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
|
||||
"Reference should not be NULL here as such are never pushed to the task queue.");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times. So redo this check.
|
||||
if (_g1h->in_cset_fast_test(obj)) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
update_rs(from, p, queue_num());
|
||||
}
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||
if (has_partial_array_mask(ref_to_scan)) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
} else {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
_evac_cl->set_region(r);
|
||||
_evac_cl->do_oop_nv(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,53 +80,6 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
template <class T> inline oop clear_partial_array_mask(T* ref) {
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
public:
|
||||
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
|
||||
{
|
||||
assert(_ref_processor == NULL, "sanity");
|
||||
}
|
||||
|
||||
G1ParScanClosure* scanner() {
|
||||
return &_scanner;
|
||||
}
|
||||
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// Add back base class for metadata
|
||||
class G1ParCopyHelper : public G1ParClosureSuper {
|
||||
protected:
|
||||
@ -173,15 +126,8 @@ typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
|
||||
|
||||
// The following closure type is defined in g1_specialized_oop_closures.hpp:
|
||||
//
|
||||
// typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
|
||||
// We use a separate closure to handle references during evacuation
|
||||
// failure processing.
|
||||
// We could have used another instance of G1ParScanHeapEvacClosure
|
||||
// (since that closure no longer assumes that the references it
|
||||
// handles point into the collection set).
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
|
@ -43,8 +43,6 @@ class G1ParCopyClosure;
|
||||
class G1ParScanClosure;
|
||||
class G1ParPushHeapRSClosure;
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
class G1CMOopClosure;
|
||||
@ -61,7 +59,6 @@ class G1UpdateRSOrPushRefOopClosure;
|
||||
#endif
|
||||
|
||||
#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
|
||||
f(G1ParScanHeapEvacClosure,_nv) \
|
||||
f(G1ParScanClosure,_nv) \
|
||||
f(G1ParPushHeapRSClosure,_nv) \
|
||||
f(FilterIntoCSClosure,_nv) \
|
||||
|
Loading…
x
Reference in New Issue
Block a user