6989448: G1: refactor and simplify G1ParScanThreadState
Reviewed-by: iveresov, tonyp
This commit is contained in:
parent
02bad20969
commit
d85b042fca
hotspot/src/share/vm/gc_implementation/g1
@ -3845,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
|
||||
undo_waste() * HeapWordSize / K);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_ref(oop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
if (has_partial_array_mask(ref)) {
|
||||
// Must be in the collection set--it's already been copied.
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
assert(_g1h->obj_in_cs(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||
} else {
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_task(StarTask ref) const {
|
||||
if (ref.is_narrow()) {
|
||||
return verify_ref((narrowOop*) ref);
|
||||
} else {
|
||||
return verify_ref((oop*) ref);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
StarTask ref;
|
||||
do {
|
||||
// Drain the overflow stack first, so other threads can steal.
|
||||
while (refs()->pop_overflow(ref)) {
|
||||
deal_with_reference(ref);
|
||||
}
|
||||
while (refs()->pop_local(ref)) {
|
||||
deal_with_reference(ref);
|
||||
}
|
||||
} while (!refs()->is_empty());
|
||||
}
|
||||
|
||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||
_par_scan_state(par_scan_state) { }
|
||||
@ -4047,39 +4095,40 @@ public:
|
||||
: _g1h(g1h), _par_scan_state(par_scan_state),
|
||||
_queues(queues), _terminator(terminator) {}
|
||||
|
||||
void do_void() {
|
||||
G1ParScanThreadState* pss = par_scan_state();
|
||||
while (true) {
|
||||
pss->trim_queue();
|
||||
void do_void();
|
||||
|
||||
StarTask stolen_task;
|
||||
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
||||
// slightly paranoid tests; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert((oop*)stolen_task != NULL, "Error");
|
||||
if (stolen_task.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
pss->push_on_queue(p);
|
||||
} else {
|
||||
oop* p = (oop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
|
||||
pss->push_on_queue(p);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
pss->start_term_time();
|
||||
if (terminator()->offer_termination()) break;
|
||||
pss->end_term_time();
|
||||
}
|
||||
pss->end_term_time();
|
||||
pss->retire_alloc_buffers();
|
||||
}
|
||||
private:
|
||||
inline bool offer_termination();
|
||||
};
|
||||
|
||||
bool G1ParEvacuateFollowersClosure::offer_termination() {
|
||||
G1ParScanThreadState* const pss = par_scan_state();
|
||||
pss->start_term_time();
|
||||
const bool res = terminator()->offer_termination();
|
||||
pss->end_term_time();
|
||||
return res;
|
||||
}
|
||||
|
||||
void G1ParEvacuateFollowersClosure::do_void() {
|
||||
StarTask stolen_task;
|
||||
G1ParScanThreadState* const pss = par_scan_state();
|
||||
pss->trim_queue();
|
||||
|
||||
do {
|
||||
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
||||
assert(pss->verify_task(stolen_task), "sanity");
|
||||
if (stolen_task.is_narrow()) {
|
||||
pss->push_on_queue((narrowOop*) stolen_task);
|
||||
} else {
|
||||
pss->push_on_queue((oop*) stolen_task);
|
||||
}
|
||||
pss->trim_queue();
|
||||
}
|
||||
} while (!offer_termination());
|
||||
|
||||
pss->retire_alloc_buffers();
|
||||
}
|
||||
|
||||
class G1ParTask : public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
@ -4177,8 +4226,7 @@ public:
|
||||
pss.print_termination_stats(i);
|
||||
}
|
||||
|
||||
assert(pss.refs_to_scan() == 0, "Task queue should be empty");
|
||||
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
|
||||
assert(pss.refs()->is_empty(), "should be empty");
|
||||
double end_time_ms = os::elapsedTime() * 1000.0;
|
||||
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
|
||||
}
|
||||
|
@ -1651,49 +1651,17 @@ public:
|
||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() const { return _undo_waste; }
|
||||
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
#ifdef ASSERT
|
||||
if (has_partial_array_mask(ref)) {
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
// Verify that we point into the CS
|
||||
assert(_g1h->obj_in_cs(p), "Should be in CS");
|
||||
}
|
||||
#endif
|
||||
bool verify_ref(narrowOop* ref) const;
|
||||
bool verify_ref(oop* ref) const;
|
||||
bool verify_task(StarTask ref) const;
|
||||
#endif // ASSERT
|
||||
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(verify_ref(ref), "sanity");
|
||||
refs()->push(ref);
|
||||
}
|
||||
|
||||
void pop_from_queue(StarTask& ref) {
|
||||
if (refs()->pop_local(ref)) {
|
||||
assert((oop*)ref != NULL, "pop_local() returned true");
|
||||
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)ref) ||
|
||||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
} else {
|
||||
StarTask null_task;
|
||||
ref = null_task;
|
||||
}
|
||||
}
|
||||
|
||||
void pop_from_overflow_queue(StarTask& ref) {
|
||||
StarTask new_ref;
|
||||
refs()->pop_overflow(new_ref);
|
||||
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
||||
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)new_ref) ||
|
||||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
ref = new_ref;
|
||||
}
|
||||
|
||||
int refs_to_scan() { return (int)refs()->size(); }
|
||||
int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
@ -1818,59 +1786,15 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue() {
|
||||
// I've replicated the loop twice, first to drain the overflow
|
||||
// queue, second to drain the task queue. This is better than
|
||||
// having a single loop, which checks both conditions and, inside
|
||||
// it, either pops the overflow queue or the task queue, as each
|
||||
// loop is tighter. Also, the decision to drain the overflow queue
|
||||
// first is not arbitrary, as the overflow queue is not visible
|
||||
// to the other workers, whereas the task queue is. So, we want to
|
||||
// drain the "invisible" entries first, while allowing the other
|
||||
// workers to potentially steal the "visible" entries.
|
||||
|
||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
||||
while (overflowed_refs_to_scan() > 0) {
|
||||
StarTask ref_to_scan;
|
||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||
pop_from_overflow_queue(ref_to_scan);
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
|
||||
if (ref_to_scan.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
|
||||
while (refs_to_scan() > 0) {
|
||||
StarTask ref_to_scan;
|
||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||
pop_from_queue(ref_to_scan);
|
||||
if ((oop*)ref_to_scan != NULL) {
|
||||
if (ref_to_scan.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
void deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue();
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user