Merge
This commit is contained in:
commit
b12be07514
@ -232,36 +232,16 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
|
||||
|
||||
// If not all threads are active, get a draining stack
|
||||
// from the list. Else, just use this threads draining stack.
|
||||
uint which_stack_index;
|
||||
bool use_all_workers = manager->all_workers_active();
|
||||
if (use_all_workers) {
|
||||
which_stack_index = which;
|
||||
assert(manager->active_workers() == ParallelGCThreads,
|
||||
"all_workers_active has been incorrectly set: "
|
||||
" active %d ParallelGCThreads %u", manager->active_workers(),
|
||||
ParallelGCThreads);
|
||||
} else {
|
||||
which_stack_index = ParCompactionManager::pop_recycled_stack_index();
|
||||
}
|
||||
|
||||
cm->set_region_stack_index(which_stack_index);
|
||||
cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
|
||||
|
||||
// Has to drain stacks first because there may be regions on
|
||||
// preloaded onto the stack and this thread may never have
|
||||
// done a draining task. Are the draining tasks needed?
|
||||
// Drain the stacks that have been preloaded with regions
|
||||
// that are ready to fill.
|
||||
|
||||
cm->drain_region_stacks();
|
||||
|
||||
guarantee(cm->region_stack()->is_empty(), "Not empty");
|
||||
|
||||
size_t region_index = 0;
|
||||
int random_seed = 17;
|
||||
|
||||
// If we're the termination task, try 10 rounds of stealing before
|
||||
// setting the termination flag
|
||||
|
||||
while(true) {
|
||||
if (ParCompactionManager::steal(which, &random_seed, region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(cm, region_index);
|
||||
@ -293,42 +273,3 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||
_region_index_start,
|
||||
_region_index_end);
|
||||
}
|
||||
|
||||
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
|
||||
uint which_stack_index;
|
||||
bool use_all_workers = manager->all_workers_active();
|
||||
if (use_all_workers) {
|
||||
which_stack_index = which;
|
||||
assert(manager->active_workers() == ParallelGCThreads,
|
||||
"all_workers_active has been incorrectly set: "
|
||||
" active %d ParallelGCThreads %u", manager->active_workers(),
|
||||
ParallelGCThreads);
|
||||
} else {
|
||||
which_stack_index = stack_index();
|
||||
}
|
||||
|
||||
cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
|
||||
|
||||
cm->set_region_stack_index(which_stack_index);
|
||||
|
||||
// Process any regions already in the compaction managers stacks.
|
||||
cm->drain_region_stacks();
|
||||
|
||||
assert(cm->region_stack()->is_empty(), "Not empty");
|
||||
|
||||
if (!use_all_workers) {
|
||||
// Always give up the region stack.
|
||||
assert(cm->region_stack() ==
|
||||
ParCompactionManager::region_list(cm->region_stack_index()),
|
||||
"region_stack and region_stack_index are inconsistent");
|
||||
ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
|
||||
|
||||
cm->set_region_stack(NULL);
|
||||
cm->set_region_stack_index((uint)max_uintx);
|
||||
}
|
||||
}
|
||||
|
@ -234,27 +234,4 @@ class UpdateDensePrefixTask : public GCTask {
|
||||
|
||||
virtual void do_it(GCTaskManager* manager, uint which);
|
||||
};
|
||||
|
||||
//
|
||||
// DrainStacksCompactionTask
|
||||
//
|
||||
// This task processes regions that have been added to the stacks of each
|
||||
// compaction manager.
|
||||
//
|
||||
// Trying to use one draining thread does not work because there are no
|
||||
// guarantees about which task will be picked up by which thread. For example,
|
||||
// if thread A gets all the preloaded regions, thread A may not get a draining
|
||||
// task (they may all be done by other threads).
|
||||
//
|
||||
|
||||
class DrainStacksCompactionTask : public GCTask {
|
||||
uint _stack_index;
|
||||
uint stack_index() { return _stack_index; }
|
||||
public:
|
||||
DrainStacksCompactionTask(uint stack_index) : GCTask(),
|
||||
_stack_index(stack_index) {};
|
||||
char* name() { return (char *)"drain-region-task"; }
|
||||
virtual void do_it(GCTaskManager* manager, uint which);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_PARALLEL_PCTASKS_HPP
|
||||
|
@ -43,8 +43,6 @@
|
||||
PSOldGen* ParCompactionManager::_old_gen = NULL;
|
||||
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
||||
|
||||
RegionTaskQueue** ParCompactionManager::_region_list = NULL;
|
||||
|
||||
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
||||
ParCompactionManager::ObjArrayTaskQueueSet*
|
||||
ParCompactionManager::_objarray_queues = NULL;
|
||||
@ -52,14 +50,8 @@ ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||
|
||||
uint* ParCompactionManager::_recycled_stack_index = NULL;
|
||||
int ParCompactionManager::_recycled_top = -1;
|
||||
int ParCompactionManager::_recycled_bottom = -1;
|
||||
|
||||
ParCompactionManager::ParCompactionManager() :
|
||||
_action(CopyAndUpdate),
|
||||
_region_stack(NULL),
|
||||
_region_stack_index((uint)max_uintx) {
|
||||
_action(CopyAndUpdate) {
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
@ -68,14 +60,11 @@ ParCompactionManager::ParCompactionManager() :
|
||||
|
||||
marking_stack()->initialize();
|
||||
_objarray_stack.initialize();
|
||||
_region_stack.initialize();
|
||||
|
||||
reset_bitmap_query_cache();
|
||||
}
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
delete _recycled_stack_index;
|
||||
}
|
||||
|
||||
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
assert(PSParallelCompact::gc_task_manager() != NULL,
|
||||
"Needed for initialization");
|
||||
@ -88,19 +77,6 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
|
||||
guarantee(_manager_array != NULL, "Could not allocate manager_array");
|
||||
|
||||
_region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
|
||||
parallel_gc_threads+1, mtGC);
|
||||
guarantee(_region_list != NULL, "Could not initialize promotion manager");
|
||||
|
||||
_recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
|
||||
|
||||
// parallel_gc-threads + 1 to be consistent with the number of
|
||||
// compaction managers.
|
||||
for(uint i=0; i<parallel_gc_threads + 1; i++) {
|
||||
_region_list[i] = new RegionTaskQueue();
|
||||
region_list(i)->initialize();
|
||||
}
|
||||
|
||||
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_stack_array != NULL, "Could not allocate stack_array");
|
||||
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
|
||||
@ -114,7 +90,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
|
||||
region_array()->register_queue(i, region_list(i));
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack());
|
||||
}
|
||||
|
||||
// The VMThread gets its own ParCompactionManager, which is not available
|
||||
@ -133,29 +109,6 @@ void ParCompactionManager::reset_all_bitmap_query_caches() {
|
||||
}
|
||||
}
|
||||
|
||||
int ParCompactionManager::pop_recycled_stack_index() {
|
||||
assert(_recycled_bottom <= _recycled_top, "list is empty");
|
||||
// Get the next available index
|
||||
if (_recycled_bottom < _recycled_top) {
|
||||
uint cur, next, last;
|
||||
do {
|
||||
cur = _recycled_bottom;
|
||||
next = cur + 1;
|
||||
last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
|
||||
} while (cur != last);
|
||||
return _recycled_stack_index[next];
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void ParCompactionManager::push_recycled_stack_index(uint v) {
|
||||
// Get the next available index
|
||||
int cur = Atomic::add(1, &_recycled_top);
|
||||
_recycled_stack_index[cur] = v;
|
||||
assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
|
||||
}
|
||||
|
||||
bool ParCompactionManager::should_update() {
|
||||
assert(action() != NotValid, "Action is not set");
|
||||
return (action() == ParCompactionManager::Update) ||
|
||||
@ -170,15 +123,6 @@ bool ParCompactionManager::should_copy() {
|
||||
(action() == ParCompactionManager::UpdateAndCopy);
|
||||
}
|
||||
|
||||
void ParCompactionManager::region_list_push(uint list_index,
|
||||
size_t region_index) {
|
||||
region_list(list_index)->push(region_index);
|
||||
}
|
||||
|
||||
void ParCompactionManager::verify_region_list_empty(uint list_index) {
|
||||
assert(region_list(list_index)->is_empty(), "Not empty");
|
||||
}
|
||||
|
||||
ParCompactionManager*
|
||||
ParCompactionManager::gc_thread_compaction_manager(uint index) {
|
||||
assert(index < ParallelGCThreads, "index out of range");
|
||||
|
@ -79,31 +79,7 @@ private:
|
||||
// Is there a way to reuse the _marking_stack for the
|
||||
// saving empty regions? For now just create a different
|
||||
// type of TaskQueue.
|
||||
RegionTaskQueue* _region_stack;
|
||||
|
||||
static RegionTaskQueue** _region_list;
|
||||
// Index in _region_list for current _region_stack.
|
||||
uint _region_stack_index;
|
||||
|
||||
// Indexes of recycled region stacks/overflow stacks
|
||||
// Stacks of regions to be compacted are embedded in the tasks doing
|
||||
// the compaction. A thread that executes the task extracts the
|
||||
// region stack and drains it. These threads keep these region
|
||||
// stacks for use during compaction task stealing. If a thread
|
||||
// gets a second draining task, it pushed its current region stack
|
||||
// index into the array _recycled_stack_index and gets a new
|
||||
// region stack from the task. A thread that is executing a
|
||||
// compaction stealing task without ever having executing a
|
||||
// draining task, will get a region stack from _recycled_stack_index.
|
||||
//
|
||||
// Array of indexes into the array of region stacks.
|
||||
static uint* _recycled_stack_index;
|
||||
// The index into _recycled_stack_index of the last region stack index
|
||||
// pushed. If -1, there are no entries into _recycled_stack_index.
|
||||
static int _recycled_top;
|
||||
// The index into _recycled_stack_index of the last region stack index
|
||||
// popped. If -1, there has not been any entry popped.
|
||||
static int _recycled_bottom;
|
||||
RegionTaskQueue _region_stack;
|
||||
|
||||
static ParMarkBitMap* _mark_bitmap;
|
||||
|
||||
@ -151,32 +127,15 @@ private:
|
||||
|
||||
static void reset_all_bitmap_query_caches();
|
||||
|
||||
RegionTaskQueue* region_stack() { return _region_stack; }
|
||||
void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
|
||||
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||
|
||||
inline static ParCompactionManager* manager_array(uint index);
|
||||
|
||||
inline static RegionTaskQueue* region_list(int index) {
|
||||
return _region_list[index];
|
||||
}
|
||||
|
||||
uint region_stack_index() { return _region_stack_index; }
|
||||
void set_region_stack_index(uint v) { _region_stack_index = v; }
|
||||
|
||||
// Pop and push unique reusable stack index
|
||||
static int pop_recycled_stack_index();
|
||||
static void push_recycled_stack_index(uint v);
|
||||
static void reset_recycled_stack_index() {
|
||||
_recycled_bottom = _recycled_top = -1;
|
||||
}
|
||||
|
||||
ParCompactionManager();
|
||||
~ParCompactionManager();
|
||||
|
||||
// Pushes onto the region stack at the given index. If the
|
||||
// region stack is full,
|
||||
// pushes onto the region overflow stack.
|
||||
static void region_list_push(uint stack_index, size_t region_index);
|
||||
static void verify_region_list_empty(uint stack_index);
|
||||
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
|
||||
|
||||
|
@ -1929,7 +1929,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
ParCompactionManager* const cm =
|
||||
ParCompactionManager::manager_array(int(i));
|
||||
assert(cm->marking_stack()->is_empty(), "should be empty");
|
||||
assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
|
||||
assert(cm->region_stack()->is_empty(), "should be empty");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -2238,7 +2238,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
void PSParallelCompact::prepare_region_draining_tasks(GCTaskQueue* q,
|
||||
uint parallel_gc_threads)
|
||||
{
|
||||
GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
|
||||
@ -2246,29 +2246,12 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
// Find the threads that are active
|
||||
unsigned int which = 0;
|
||||
|
||||
const uint task_count = MAX2(parallel_gc_threads, 1U);
|
||||
for (uint j = 0; j < task_count; j++) {
|
||||
q->enqueue(new DrainStacksCompactionTask(j));
|
||||
ParCompactionManager::verify_region_list_empty(j);
|
||||
// Set the region stacks variables to "no" region stack values
|
||||
// so that they will be recognized and needing a region stack
|
||||
// in the stealing tasks if they do not get one by executing
|
||||
// a draining stack.
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(j);
|
||||
cm->set_region_stack(NULL);
|
||||
cm->set_region_stack_index((uint)max_uintx);
|
||||
}
|
||||
ParCompactionManager::reset_recycled_stack_index();
|
||||
|
||||
// Find all regions that are available (can be filled immediately) and
|
||||
// distribute them to the thread stacks. The iteration is done in reverse
|
||||
// order (high to low) so the regions will be removed in ascending order.
|
||||
|
||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||
|
||||
// A region index which corresponds to the tasks created above.
|
||||
// "which" must be 0 <= which < task_count
|
||||
|
||||
which = 0;
|
||||
// id + 1 is used to test termination so unsigned can
|
||||
// be used with an old_space_id == 0.
|
||||
@ -2284,12 +2267,11 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
|
||||
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
|
||||
if (sd.region(cur)->claim_unsafe()) {
|
||||
ParCompactionManager::region_list_push(which, cur);
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(which);
|
||||
cm->region_stack()->push(cur);
|
||||
region_logger.handle(cur);
|
||||
// Assign regions to tasks in round-robin fashion.
|
||||
if (++which == task_count) {
|
||||
assert(which <= parallel_gc_threads,
|
||||
"Inconsistent number of workers");
|
||||
if (++which == parallel_gc_threads) {
|
||||
which = 0;
|
||||
}
|
||||
}
|
||||
@ -2448,7 +2430,7 @@ void PSParallelCompact::compact() {
|
||||
ParallelTaskTerminator terminator(active_gc_threads, qset);
|
||||
|
||||
GCTaskQueue* q = GCTaskQueue::create();
|
||||
enqueue_region_draining_tasks(q, active_gc_threads);
|
||||
prepare_region_draining_tasks(q, active_gc_threads);
|
||||
enqueue_dense_prefix_tasks(q, active_gc_threads);
|
||||
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
|
||||
|
||||
|
@ -1079,7 +1079,7 @@ class PSParallelCompact : AllStatic {
|
||||
static void compact();
|
||||
|
||||
// Add available regions to the stack and draining tasks to the task queue.
|
||||
static void enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
static void prepare_region_draining_tasks(GCTaskQueue* q,
|
||||
uint parallel_gc_threads);
|
||||
|
||||
// Add dense prefix update tasks to the task queue.
|
||||
|
Loading…
Reference in New Issue
Block a user