8244684: G1 abuses StarTask to also include partial objarray scan tasks
New ScannerTask and PartialArrayScanTask, initially used by G1 Reviewed-by: tschatzl, sjohanss
This commit is contained in:
parent
5b6f81de07
commit
1856ff8913
@ -1542,12 +1542,12 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_filler_array_max_size = _humongous_object_threshold_in_words;
|
||||
|
||||
uint n_queues = ParallelGCThreads;
|
||||
_task_queues = new RefToScanQueueSet(n_queues);
|
||||
_task_queues = new ScannerTasksQueueSet(n_queues);
|
||||
|
||||
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
|
||||
|
||||
for (uint i = 0; i < n_queues; i++) {
|
||||
RefToScanQueue* q = new RefToScanQueue();
|
||||
ScannerTasksQueue* q = new ScannerTasksQueue();
|
||||
q->initialize();
|
||||
_task_queues->register_queue(i, q);
|
||||
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
|
||||
@ -3399,7 +3399,7 @@ public:
|
||||
// When the queue is drained (after each phase of reference processing)
|
||||
// the object and it's followers will be copied, the reference field set
|
||||
// to point to the new location, and the RSet updated.
|
||||
_par_scan_state->push_on_queue(p);
|
||||
_par_scan_state->push_on_queue(ScannerTask(p));
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -3436,14 +3436,14 @@ class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _queues;
|
||||
ScannerTasksQueueSet* _queues;
|
||||
WorkGang* _workers;
|
||||
|
||||
public:
|
||||
G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
WorkGang* workers,
|
||||
RefToScanQueueSet *task_queues) :
|
||||
ScannerTasksQueueSet *task_queues) :
|
||||
_g1h(g1h),
|
||||
_pss(per_thread_states),
|
||||
_queues(task_queues),
|
||||
@ -3463,14 +3463,14 @@ class G1STWRefProcTaskProxy: public AbstractGangTask {
|
||||
ProcessTask& _proc_task;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _task_queues;
|
||||
ScannerTasksQueueSet* _task_queues;
|
||||
TaskTerminator* _terminator;
|
||||
|
||||
public:
|
||||
G1STWRefProcTaskProxy(ProcessTask& proc_task,
|
||||
G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet *task_queues,
|
||||
ScannerTasksQueueSet *task_queues,
|
||||
TaskTerminator* terminator) :
|
||||
AbstractGangTask("Process reference objects in parallel"),
|
||||
_proc_task(proc_task),
|
||||
@ -3801,7 +3801,7 @@ class G1EvacuateRegionsBaseTask : public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _per_thread_states;
|
||||
RefToScanQueueSet* _task_queues;
|
||||
ScannerTasksQueueSet* _task_queues;
|
||||
TaskTerminator _terminator;
|
||||
uint _num_workers;
|
||||
|
||||
@ -3839,7 +3839,10 @@ protected:
|
||||
virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
|
||||
|
||||
public:
|
||||
G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :
|
||||
G1EvacuateRegionsBaseTask(const char* name,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
ScannerTasksQueueSet* task_queues,
|
||||
uint num_workers) :
|
||||
AbstractGangTask(name),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_per_thread_states(per_thread_states),
|
||||
@ -3890,7 +3893,7 @@ class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
|
||||
public:
|
||||
G1EvacuateRegionsTask(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet* task_queues,
|
||||
ScannerTasksQueueSet* task_queues,
|
||||
G1RootProcessor* root_processor,
|
||||
uint num_workers) :
|
||||
G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
|
||||
@ -3938,7 +3941,7 @@ class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
|
||||
|
||||
public:
|
||||
G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet* queues,
|
||||
ScannerTasksQueueSet* queues,
|
||||
uint num_workers) :
|
||||
G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
|
||||
}
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "gc/shared/plab.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "gc/shared/softRefPolicy.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
@ -97,8 +98,8 @@ class G1HeapSizingPolicy;
|
||||
class G1HeapSummary;
|
||||
class G1EvacSummary;
|
||||
|
||||
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
||||
typedef OverflowTaskQueue<ScannerTask, mtGC> ScannerTasksQueue;
|
||||
typedef GenericTaskQueueSet<ScannerTasksQueue, mtGC> ScannerTasksQueueSet;
|
||||
|
||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
@ -814,7 +815,7 @@ public:
|
||||
G1ConcurrentRefine* _cr;
|
||||
|
||||
// The parallel task queues
|
||||
RefToScanQueueSet *_task_queues;
|
||||
ScannerTasksQueueSet *_task_queues;
|
||||
|
||||
// True iff a evacuation has failed in the current collection.
|
||||
bool _evacuation_failed;
|
||||
@ -951,7 +952,7 @@ public:
|
||||
G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
|
||||
public:
|
||||
|
||||
RefToScanQueue *task_queue(uint i) const;
|
||||
ScannerTasksQueue* task_queue(uint i) const;
|
||||
|
||||
uint num_task_queues() const;
|
||||
|
||||
@ -1478,18 +1479,18 @@ private:
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
RefToScanQueueSet* _queues;
|
||||
ScannerTasksQueueSet* _queues;
|
||||
TaskTerminator* _terminator;
|
||||
G1GCPhaseTimes::GCParPhases _phase;
|
||||
|
||||
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
ScannerTasksQueueSet* queues() { return _queues; }
|
||||
TaskTerminator* terminator() { return _terminator; }
|
||||
|
||||
public:
|
||||
G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* par_scan_state,
|
||||
RefToScanQueueSet* queues,
|
||||
ScannerTasksQueueSet* queues,
|
||||
TaskTerminator* terminator,
|
||||
G1GCPhaseTimes::GCParPhases phase)
|
||||
: _start_term(0.0), _term_time(0.0), _term_attempts(0),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -139,7 +139,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
card_table()->g1_mark_as_young(mr);
|
||||
}
|
||||
|
||||
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||
inline ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||
return _task_queues->queue(i);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,7 +58,7 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
|
||||
obj->forwardee() == RawAccess<>::oop_load(p)),
|
||||
"p should still be pointing to obj or to its forwardee");
|
||||
|
||||
_par_scan_state->push_on_queue(p);
|
||||
_par_scan_state->push_on_queue(ScannerTask(p));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,7 +43,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
size_t young_cset_length,
|
||||
size_t optional_cset_length)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(worker_id)),
|
||||
_task_queue(g1h->task_queue(worker_id)),
|
||||
_rdcq(rdcqs),
|
||||
_ct(g1h->card_table()),
|
||||
_closures(NULL),
|
||||
@ -119,46 +119,45 @@ size_t G1ParScanThreadState::lab_undo_waste_words() const {
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
void G1ParScanThreadState::verify_task(narrowOop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
|
||||
oop p = RawAccess<>::oop_load(ref);
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
return true;
|
||||
"task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_ref(oop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
if (has_partial_array_mask(ref)) {
|
||||
// Must be in the collection set--it's already been copied.
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
assert(_g1h->is_in_cset(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
} else {
|
||||
oop p = RawAccess<>::oop_load(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
}
|
||||
return true;
|
||||
void G1ParScanThreadState::verify_task(oop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
"task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_task(StarTask ref) const {
|
||||
if (ref.is_narrow()) {
|
||||
return verify_ref((narrowOop*) ref);
|
||||
void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
|
||||
// Must be in the collection set--it's already been copied.
|
||||
oop p = task.to_source_array();
|
||||
assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::verify_task(ScannerTask task) const {
|
||||
if (task.is_narrow_oop_ptr()) {
|
||||
verify_task(task.to_narrow_oop_ptr());
|
||||
} else if (task.is_oop_ptr()) {
|
||||
verify_task(task.to_oop_ptr());
|
||||
} else if (task.is_partial_array_task()) {
|
||||
verify_task(task.to_partial_array_task());
|
||||
} else {
|
||||
return verify_ref((oop*) ref);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
StarTask ref;
|
||||
do {
|
||||
// Fully drain the queue.
|
||||
trim_queue_to_threshold(0);
|
||||
} while (!_refs->is_empty());
|
||||
} while (!_task_queue->is_empty());
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
@ -330,8 +329,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
||||
// the to-space object. The actual length can be found in the
|
||||
// length field of the from-space object.
|
||||
arrayOop(obj)->set_length(0);
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
do_oop_partial_array(old_p);
|
||||
do_partial_array(PartialArrayScanTask(old));
|
||||
} else {
|
||||
G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,7 @@
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/shared/ageTable.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
@ -45,7 +46,7 @@ class outputStream;
|
||||
|
||||
class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
ScannerTasksQueue* _task_queue;
|
||||
G1RedirtyCardsQueue _rdcq;
|
||||
G1CardTable* _ct;
|
||||
G1EvacuationRootClosures* _closures;
|
||||
@ -114,15 +115,15 @@ public:
|
||||
void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool queue_is_empty() const { return _refs->is_empty(); }
|
||||
bool queue_is_empty() const { return _task_queue->is_empty(); }
|
||||
#endif
|
||||
|
||||
bool verify_ref(narrowOop* ref) const;
|
||||
bool verify_ref(oop* ref) const;
|
||||
bool verify_task(StarTask ref) const;
|
||||
#endif // ASSERT
|
||||
void verify_task(narrowOop* task) const NOT_DEBUG_RETURN;
|
||||
void verify_task(oop* task) const NOT_DEBUG_RETURN;
|
||||
void verify_task(PartialArrayScanTask task) const NOT_DEBUG_RETURN;
|
||||
void verify_task(ScannerTask task) const NOT_DEBUG_RETURN;
|
||||
|
||||
template <class T> void do_oop_ext(T* ref);
|
||||
template <class T> void push_on_queue(T* ref);
|
||||
void push_on_queue(ScannerTask task);
|
||||
|
||||
template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
|
||||
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
|
||||
@ -158,43 +159,12 @@ public:
|
||||
size_t flush(size_t* surviving_young_words);
|
||||
|
||||
private:
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) const {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) const {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) const {
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline void do_oop_partial_array(oop* p);
|
||||
inline void do_partial_array(PartialArrayScanTask task);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> inline void do_oop_evac(T* p);
|
||||
|
||||
inline void deal_with_reference(oop* ref_to_scan);
|
||||
inline void deal_with_reference(narrowOop* ref_to_scan);
|
||||
|
||||
inline void dispatch_reference(StarTask ref);
|
||||
inline void dispatch_task(ScannerTask task);
|
||||
|
||||
// Tries to allocate word_sz in the PLAB of the next "generation" after trying to
|
||||
// allocate into dest. Previous_plab_refill_failed indicates whether previous
|
||||
@ -232,7 +202,7 @@ public:
|
||||
Tickspan trim_ticks() const;
|
||||
void reset_trim_ticks();
|
||||
|
||||
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
|
||||
inline void steal_and_trim_queue(ScannerTasksQueueSet *task_queues);
|
||||
|
||||
// An attempt to evacuate "obj" has failed; take necessary steps.
|
||||
oop handle_evacuation_failure_par(oop obj, markWord m);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,14 +71,13 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
|
||||
assert(verify_ref(ref), "sanity");
|
||||
_refs->push(ref);
|
||||
inline void G1ParScanThreadState::push_on_queue(ScannerTask task) {
|
||||
verify_task(task);
|
||||
_task_queue->push(task);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
inline void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
|
||||
oop from_obj = task.to_source_array();
|
||||
|
||||
assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
@ -105,8 +104,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
@ -127,35 +125,23 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
do_oop_evac(ref_to_scan);
|
||||
inline void G1ParScanThreadState::dispatch_task(ScannerTask task) {
|
||||
verify_task(task);
|
||||
if (task.is_narrow_oop_ptr()) {
|
||||
do_oop_evac(task.to_narrow_oop_ptr());
|
||||
} else if (task.is_oop_ptr()) {
|
||||
do_oop_evac(task.to_oop_ptr());
|
||||
} else {
|
||||
do_oop_partial_array(ref_to_scan);
|
||||
do_partial_array(task.to_partial_array_task());
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) {
|
||||
assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays.");
|
||||
do_oop_evac(ref_to_scan);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
|
||||
StarTask stolen_task;
|
||||
void G1ParScanThreadState::steal_and_trim_queue(ScannerTasksQueueSet *task_queues) {
|
||||
ScannerTask stolen_task;
|
||||
while (task_queues->steal(_worker_id, stolen_task)) {
|
||||
assert(verify_task(stolen_task), "sanity");
|
||||
dispatch_reference(stolen_task);
|
||||
dispatch_task(stolen_task);
|
||||
|
||||
// We've just processed a reference and we might have made
|
||||
// We've just processed a task and we might have made
|
||||
// available new entries on the queues. So we have to make sure
|
||||
// we drain the queues as necessary.
|
||||
trim_queue();
|
||||
@ -163,24 +149,26 @@ void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues)
|
||||
}
|
||||
|
||||
inline bool G1ParScanThreadState::needs_partial_trimming() const {
|
||||
return !_refs->overflow_empty() || _refs->size() > _stack_trim_upper_threshold;
|
||||
return !_task_queue->overflow_empty() ||
|
||||
(_task_queue->size() > _stack_trim_upper_threshold);
|
||||
}
|
||||
|
||||
inline bool G1ParScanThreadState::is_partially_trimmed() const {
|
||||
return _refs->overflow_empty() && _refs->size() <= _stack_trim_lower_threshold;
|
||||
return _task_queue->overflow_empty() &&
|
||||
(_task_queue->size() <= _stack_trim_lower_threshold);
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
|
||||
StarTask ref;
|
||||
ScannerTask task;
|
||||
// Drain the overflow stack first, so other threads can potentially steal.
|
||||
while (_refs->pop_overflow(ref)) {
|
||||
if (!_refs->try_push_to_taskqueue(ref)) {
|
||||
dispatch_reference(ref);
|
||||
while (_task_queue->pop_overflow(task)) {
|
||||
if (!_task_queue->try_push_to_taskqueue(task)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
while (_refs->pop_local(ref, threshold)) {
|
||||
dispatch_reference(ref);
|
||||
while (_task_queue->pop_local(task, threshold)) {
|
||||
dispatch_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,7 +208,7 @@ inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p)
|
||||
assert(index < _num_optional_regions,
|
||||
"Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _num_optional_regions);
|
||||
_oops_into_optional_regions[index].push_oop(p);
|
||||
DEBUG_ONLY(verify_ref(p);)
|
||||
verify_task(p);
|
||||
}
|
||||
|
||||
G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const HeapRegion* hr) {
|
||||
|
@ -564,4 +564,88 @@ private:
|
||||
int _index;
|
||||
};
|
||||
|
||||
// Wrapper over an oop that is a partially scanned array.
|
||||
// Can be converted to a ScannerTask for placement in associated task queues.
|
||||
// Refers to the partially copied source array oop.
|
||||
class PartialArrayScanTask {
|
||||
oop _src;
|
||||
|
||||
public:
|
||||
PartialArrayScanTask() : _src() {}
|
||||
explicit PartialArrayScanTask(oop src_array) : _src(src_array) {}
|
||||
// Trivially copyable.
|
||||
|
||||
oop to_source_array() const { return _src; }
|
||||
};
|
||||
|
||||
// Discriminated union over oop*, narrowOop*, and PartialArrayScanTask.
|
||||
// Uses a low tag in the associated pointer to identify the category.
|
||||
// Used as a task queue element type.
|
||||
class ScannerTask {
|
||||
void* _p;
|
||||
|
||||
static const uintptr_t OopTag = 0;
|
||||
static const uintptr_t NarrowOopTag = 1;
|
||||
static const uintptr_t PartialArrayTag = 2;
|
||||
static const uintptr_t TagSize = 2;
|
||||
static const uintptr_t TagAlignment = 1 << TagSize;
|
||||
static const uintptr_t TagMask = TagAlignment - 1;
|
||||
|
||||
static void* encode(void* p, uintptr_t tag) {
|
||||
assert(is_aligned(p, TagAlignment), "misaligned: " PTR_FORMAT, p2i(p));
|
||||
return static_cast<char*>(p) + tag;
|
||||
}
|
||||
|
||||
uintptr_t raw_value() const {
|
||||
return reinterpret_cast<uintptr_t>(_p);
|
||||
}
|
||||
|
||||
bool has_tag(uintptr_t tag) const {
|
||||
return (raw_value() & TagMask) == tag;
|
||||
}
|
||||
|
||||
void* decode(uintptr_t tag) const {
|
||||
assert(has_tag(tag), "precondition");
|
||||
return static_cast<char*>(_p) - tag;
|
||||
}
|
||||
|
||||
public:
|
||||
ScannerTask() : _p(NULL) {}
|
||||
|
||||
explicit ScannerTask(oop* p) : _p(encode(p, OopTag)) {}
|
||||
|
||||
explicit ScannerTask(narrowOop* p) : _p(encode(p, NarrowOopTag)) {}
|
||||
|
||||
explicit ScannerTask(PartialArrayScanTask t) :
|
||||
_p(encode(t.to_source_array(), PartialArrayTag)) {}
|
||||
|
||||
// Trivially copyable.
|
||||
|
||||
// Predicate implementations assume OopTag == 0, others are powers of 2.
|
||||
|
||||
bool is_oop_ptr() const {
|
||||
return (raw_value() & (NarrowOopTag | PartialArrayTag)) == 0;
|
||||
}
|
||||
|
||||
bool is_narrow_oop_ptr() const {
|
||||
return (raw_value() & NarrowOopTag) != 0;
|
||||
}
|
||||
|
||||
bool is_partial_array_task() const {
|
||||
return (raw_value() & PartialArrayTag) != 0;
|
||||
}
|
||||
|
||||
oop* to_oop_ptr() const {
|
||||
return static_cast<oop*>(decode(OopTag));
|
||||
}
|
||||
|
||||
narrowOop* to_narrow_oop_ptr() const {
|
||||
return static_cast<narrowOop*>(decode(NarrowOopTag));
|
||||
}
|
||||
|
||||
PartialArrayScanTask to_partial_array_task() const {
|
||||
return PartialArrayScanTask(oop(decode(PartialArrayTag)));
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_TASKQUEUE_HPP
|
||||
|
Loading…
x
Reference in New Issue
Block a user