4396719: Mark Sweep stack overflow on deeply nested Object arrays
Use an explicit stack for object arrays and process them in chunks. Reviewed-by: iveresov, apetrusenko
This commit is contained in:
parent
5fdec09798
commit
4f82340476
@ -24,8 +24,8 @@
|
||||
|
||||
class G1CollectedHeap;
|
||||
class CMTask;
|
||||
typedef GenericTaskQueue<oop> CMTaskQueue;
|
||||
typedef GenericTaskQueueSet<oop> CMTaskQueueSet;
|
||||
typedef GenericTaskQueue<oop> CMTaskQueue;
|
||||
typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
|
||||
|
||||
// A generic CM bit map. This is essentially a wrapper around the BitMap
|
||||
// class, with one bit per (1<<_shifter) HeapWords.
|
||||
|
@ -56,8 +56,8 @@ class ConcurrentZFThread;
|
||||
# define IF_G1_DETAILED_STATS(code)
|
||||
#endif
|
||||
|
||||
typedef GenericTaskQueue<StarTask> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet;
|
||||
typedef GenericTaskQueue<StarTask> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
|
||||
|
||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
|
@ -101,6 +101,8 @@ void G1MarkSweep::allocate_stacks() {
|
||||
|
||||
GenMarkSweep::_marking_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
GenMarkSweep::_objarray_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
GenMarkSweep::_revisit_klass_stack =
|
||||
|
@ -175,6 +175,7 @@ psAdaptiveSizePolicy.hpp gcUtil.hpp
|
||||
psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp
|
||||
|
||||
psCompactionManager.cpp gcTaskManager.hpp
|
||||
psCompactionManager.cpp objArrayKlass.inline.hpp
|
||||
psCompactionManager.cpp objectStartArray.hpp
|
||||
psCompactionManager.cpp oop.hpp
|
||||
psCompactionManager.cpp oop.inline.hpp
|
||||
@ -189,6 +190,9 @@ psCompactionManager.cpp systemDictionary.hpp
|
||||
psCompactionManager.hpp allocation.hpp
|
||||
psCompactionManager.hpp taskqueue.hpp
|
||||
|
||||
psCompactionManager.inline.hpp psCompactionManager.hpp
|
||||
psCompactionManager.inline.hpp psParallelCompact.hpp
|
||||
|
||||
psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp
|
||||
psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
|
||||
psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp
|
||||
@ -379,12 +383,12 @@ pcTasks.cpp fprofiler.hpp
|
||||
pcTasks.cpp jniHandles.hpp
|
||||
pcTasks.cpp jvmtiExport.hpp
|
||||
pcTasks.cpp management.hpp
|
||||
pcTasks.cpp objArrayKlass.inline.hpp
|
||||
pcTasks.cpp psParallelCompact.hpp
|
||||
pcTasks.cpp pcTasks.hpp
|
||||
pcTasks.cpp oop.inline.hpp
|
||||
pcTasks.cpp oop.pcgc.inline.hpp
|
||||
pcTasks.cpp systemDictionary.hpp
|
||||
pcTasks.cpp taskqueue.hpp
|
||||
pcTasks.cpp thread.hpp
|
||||
pcTasks.cpp universe.hpp
|
||||
pcTasks.cpp vmThread.hpp
|
||||
|
@ -48,7 +48,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
_vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
|
||||
|
||||
// Do the real work
|
||||
cm->drain_marking_stacks(&mark_and_push_closure);
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
|
||||
|
||||
@ -118,7 +118,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
}
|
||||
|
||||
// Do the real work
|
||||
cm->drain_marking_stacks(&mark_and_push_closure);
|
||||
cm->follow_marking_stacks();
|
||||
// cm->deallocate_stacks();
|
||||
}
|
||||
|
||||
@ -196,17 +196,19 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
|
||||
oop obj = NULL;
|
||||
ObjArrayTask task;
|
||||
int random_seed = 17;
|
||||
while(true) {
|
||||
if (ParCompactionManager::steal(which, &random_seed, obj)) {
|
||||
obj->follow_contents(cm);
|
||||
cm->drain_marking_stacks(&mark_and_push_closure);
|
||||
} else {
|
||||
if (terminator()->offer_termination()) {
|
||||
break;
|
||||
}
|
||||
do {
|
||||
while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(cm, task.obj(), task.index());
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
}
|
||||
while (ParCompactionManager::steal(which, &random_seed, obj)) {
|
||||
obj->follow_contents(cm);
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
} while (!terminator()->offer_termination());
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -28,6 +28,8 @@
|
||||
PSOldGen* ParCompactionManager::_old_gen = NULL;
|
||||
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
||||
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
||||
ParCompactionManager::ObjArrayTaskQueueSet*
|
||||
ParCompactionManager::_objarray_queues = NULL;
|
||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||
@ -46,6 +48,11 @@ ParCompactionManager::ParCompactionManager() :
|
||||
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||
|
||||
_objarray_queue.initialize();
|
||||
_objarray_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_stack()->initialize();
|
||||
#else
|
||||
@ -69,6 +76,7 @@ ParCompactionManager::ParCompactionManager() :
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
delete _overflow_stack;
|
||||
delete _objarray_overflow_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
// _manager_array and _stack_array are statics
|
||||
@ -86,18 +94,21 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
|
||||
assert(_manager_array == NULL, "Attempt to initialize twice");
|
||||
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
|
||||
guarantee(_manager_array != NULL, "Could not initialize promotion manager");
|
||||
guarantee(_manager_array != NULL, "Could not allocate manager_array");
|
||||
|
||||
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
||||
guarantee(_stack_array != NULL, "Could not allocate stack_array");
|
||||
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
|
||||
_region_array = new RegionTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_region_array != NULL, "Count not initialize promotion manager");
|
||||
guarantee(_region_array != NULL, "Could not allocate region_array");
|
||||
|
||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
_manager_array[i] = new ParCompactionManager();
|
||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
|
||||
#else
|
||||
@ -203,36 +214,30 @@ void ParCompactionManager::reset() {
|
||||
}
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
MutableSpace* to_space = heap->young_gen()->to_space();
|
||||
MutableSpace* old_space = heap->old_gen()->object_space();
|
||||
MutableSpace* perm_space = heap->perm_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
||||
|
||||
void ParCompactionManager::follow_marking_stacks() {
|
||||
do {
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!overflow_stack()->is_empty()) {
|
||||
oop obj = overflow_stack()->pop();
|
||||
obj->follow_contents(this);
|
||||
// Drain the overflow stack first, to allow stealing from the marking stack.
|
||||
while (!overflow_stack()->is_empty()) {
|
||||
overflow_stack()->pop()->follow_contents(this);
|
||||
}
|
||||
|
||||
oop obj;
|
||||
// obj is a reference!!!
|
||||
while (marking_stack()->pop_local(obj)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
obj->follow_contents(this);
|
||||
}
|
||||
} while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
|
||||
|
||||
assert(marking_stack()->size() == 0, "Sanity");
|
||||
assert(overflow_stack()->length() == 0, "Sanity");
|
||||
ObjArrayTask task;
|
||||
while (!_objarray_overflow_stack->is_empty()) {
|
||||
task = _objarray_overflow_stack->pop();
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(this, task.obj(), task.index());
|
||||
}
|
||||
while (_objarray_queue.pop_local(task)) {
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(this, task.obj(), task.index());
|
||||
}
|
||||
} while (!marking_stacks_empty());
|
||||
|
||||
assert(marking_stacks_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_region_overflow_stack() {
|
||||
|
@ -22,18 +22,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// psPromotionManager is used by a single thread to manage object survival
|
||||
// during a scavenge. The promotion manager contains thread local data only.
|
||||
//
|
||||
// NOTE! Be carefull when allocating the stacks on cheap. If you are going
|
||||
// to use a promotion manager in more than one thread, the stacks MUST be
|
||||
// on cheap. This can lead to memory leaks, though, as they are not auto
|
||||
// deallocated.
|
||||
//
|
||||
// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
|
||||
//
|
||||
|
||||
// Move to some global location
|
||||
#define HAS_BEEN_MOVED 0x1501d01d
|
||||
// End move to some global location
|
||||
@ -46,8 +34,6 @@ class ObjectStartArray;
|
||||
class ParallelCompactData;
|
||||
class ParMarkBitMap;
|
||||
|
||||
// Move to it's own file if this works out.
|
||||
|
||||
class ParCompactionManager : public CHeapObj {
|
||||
friend class ParallelTaskTerminator;
|
||||
friend class ParMarkBitMap;
|
||||
@ -72,14 +58,27 @@ class ParCompactionManager : public CHeapObj {
|
||||
// ------------------------ End don't putback if not needed
|
||||
|
||||
private:
|
||||
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
|
||||
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
|
||||
typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
|
||||
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
|
||||
#undef OBJARRAY_QUEUE_SIZE
|
||||
|
||||
static ParCompactionManager** _manager_array;
|
||||
static OopTaskQueueSet* _stack_array;
|
||||
static ObjArrayTaskQueueSet* _objarray_queues;
|
||||
static ObjectStartArray* _start_array;
|
||||
static RegionTaskQueueSet* _region_array;
|
||||
static PSOldGen* _old_gen;
|
||||
|
||||
private:
|
||||
OopTaskQueue _marking_stack;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
|
||||
typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
|
||||
ObjArrayTaskQueue _objarray_queue;
|
||||
ObjArrayOverflowStack* _objarray_overflow_stack;
|
||||
|
||||
// Is there a way to reuse the _marking_stack for the
|
||||
// saving empty regions? For now just create a different
|
||||
// type of TaskQueue.
|
||||
@ -128,8 +127,8 @@ class ParCompactionManager : public CHeapObj {
|
||||
// Pushes onto the region stack. If the region stack is full,
|
||||
// pushes onto the region overflow stack.
|
||||
void region_stack_push(size_t region_index);
|
||||
public:
|
||||
|
||||
public:
|
||||
Action action() { return _action; }
|
||||
void set_action(Action v) { _action = v; }
|
||||
|
||||
@ -163,6 +162,8 @@ class ParCompactionManager : public CHeapObj {
|
||||
// Get a oop for scanning. If returns null, no oop were found.
|
||||
oop retrieve_for_scanning();
|
||||
|
||||
inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
// Save region for later processing. Must not fail.
|
||||
void save_for_processing(size_t region_index);
|
||||
// Get a region for processing. If returns null, no region were found.
|
||||
@ -175,12 +176,17 @@ class ParCompactionManager : public CHeapObj {
|
||||
return stack_array()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
|
||||
return _objarray_queues->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal(int queue_num, int* seed, RegionTask& t) {
|
||||
return region_array()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_marking_stacks(OopClosure *blk);
|
||||
// Process tasks remaining on any marking stack
|
||||
void follow_marking_stacks();
|
||||
inline bool marking_stacks_empty() const;
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_region_stacks();
|
||||
@ -200,3 +206,8 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
|
||||
"out of range manager_array access");
|
||||
return _manager_array[index];
|
||||
}
|
||||
|
||||
bool ParCompactionManager::marking_stacks_empty() const {
|
||||
return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
|
||||
_objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
|
||||
}
|
||||
|
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
void ParCompactionManager::push_objarray(oop obj, size_t index)
|
||||
{
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
if (!_objarray_queue.push(task)) {
|
||||
_objarray_overflow_stack->push(task);
|
||||
}
|
||||
}
|
@ -479,6 +479,7 @@ void PSMarkSweep::allocate_stacks() {
|
||||
_preserved_oop_stack = NULL;
|
||||
|
||||
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
@ -497,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
|
||||
}
|
||||
|
||||
delete _marking_stack;
|
||||
delete _objarray_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
}
|
||||
|
@ -785,7 +785,7 @@ PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closu
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
|
||||
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
|
||||
@ -2376,7 +2376,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
// Follow code cache roots.
|
||||
CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
|
||||
purged_class);
|
||||
follow_stack(cm); // Flush marking stack.
|
||||
cm->follow_marking_stacks(); // Flush marking stack.
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// revisit_klass_stack is used in follow_weak_klass_links().
|
||||
@ -2389,8 +2389,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
SymbolTable::unlink(is_alive_closure());
|
||||
StringTable::unlink(is_alive_closure());
|
||||
|
||||
assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
|
||||
assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
|
||||
assert(cm->marking_stacks_empty(), "marking stacks should be empty");
|
||||
}
|
||||
|
||||
// This should be moved to the shared markSweep code!
|
||||
@ -2709,22 +2708,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
|
||||
young_gen->move_and_update(cm);
|
||||
}
|
||||
|
||||
|
||||
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
|
||||
while(!cm->overflow_stack()->is_empty()) {
|
||||
oop obj = cm->overflow_stack()->pop();
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
|
||||
oop obj;
|
||||
// obj is a reference!!!
|
||||
while (cm->marking_stack()->pop_local(obj)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PSParallelCompact::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
@ -2745,7 +2728,7 @@ PSParallelCompact::follow_weak_klass_links() {
|
||||
&keep_alive_closure);
|
||||
}
|
||||
// revisit_klass_stack is cleared in reset()
|
||||
follow_stack(cm);
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2776,7 +2759,7 @@ void PSParallelCompact::follow_mdo_weak_refs() {
|
||||
rms->at(j)->follow_weak_refs(is_alive_closure());
|
||||
}
|
||||
// revisit_mdo_stack is cleared in reset()
|
||||
follow_stack(cm);
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -901,7 +901,6 @@ class PSParallelCompact : AllStatic {
|
||||
// Mark live objects
|
||||
static void marking_phase(ParCompactionManager* cm,
|
||||
bool maximum_heap_compaction);
|
||||
static void follow_stack(ParCompactionManager* cm);
|
||||
static void follow_weak_klass_links();
|
||||
static void follow_mdo_weak_refs();
|
||||
|
||||
@ -1276,7 +1275,7 @@ inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
|
||||
}
|
||||
}
|
||||
}
|
||||
follow_stack(cm);
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -25,8 +25,9 @@
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_markSweep.cpp.incl"
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
|
||||
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
|
||||
GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
|
||||
GrowableArray<ObjArrayTask>* MarkSweep::_objarray_stack = NULL;
|
||||
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
|
||||
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
|
||||
@ -104,11 +105,18 @@ void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
|
||||
|
||||
void MarkSweep::follow_stack() {
|
||||
while (!_marking_stack->is_empty()) {
|
||||
oop obj = _marking_stack->pop();
|
||||
assert (obj->is_gc_marked(), "p must be marked");
|
||||
obj->follow_contents();
|
||||
}
|
||||
do {
|
||||
while (!_marking_stack->is_empty()) {
|
||||
oop obj = _marking_stack->pop();
|
||||
assert (obj->is_gc_marked(), "p must be marked");
|
||||
obj->follow_contents();
|
||||
}
|
||||
while (!_objarray_stack->is_empty()) {
|
||||
ObjArrayTask task = _objarray_stack->pop();
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(task.obj(), task.index());
|
||||
}
|
||||
} while (!_marking_stack->is_empty() || !_objarray_stack->is_empty());
|
||||
}
|
||||
|
||||
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
||||
|
@ -110,8 +110,9 @@ class MarkSweep : AllStatic {
|
||||
// Vars
|
||||
//
|
||||
protected:
|
||||
// Traversal stack used during phase1
|
||||
// Traversal stacks used during phase1
|
||||
static GrowableArray<oop>* _marking_stack;
|
||||
static GrowableArray<ObjArrayTask>* _objarray_stack;
|
||||
// Stack for live klasses to revisit at end of marking phase
|
||||
static GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
// Set (stack) of MDO's to revisit at end of marking phase
|
||||
@ -188,6 +189,7 @@ class MarkSweep : AllStatic {
|
||||
template <class T> static inline void mark_and_follow(T* p);
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static inline void mark_and_push(T* p);
|
||||
static inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
|
@ -77,6 +77,12 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack->push(task);
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
|
@ -2724,8 +2724,10 @@ markOop.inline.hpp markOop.hpp
|
||||
|
||||
markSweep.cpp compileBroker.hpp
|
||||
markSweep.cpp methodDataOop.hpp
|
||||
markSweep.cpp objArrayKlass.inline.hpp
|
||||
|
||||
markSweep.hpp collectedHeap.hpp
|
||||
markSweep.hpp taskqueue.hpp
|
||||
|
||||
memRegion.cpp globals.hpp
|
||||
memRegion.cpp memRegion.hpp
|
||||
@ -3054,8 +3056,10 @@ objArrayKlass.cpp copy.hpp
|
||||
objArrayKlass.cpp genOopClosures.inline.hpp
|
||||
objArrayKlass.cpp handles.inline.hpp
|
||||
objArrayKlass.cpp instanceKlass.hpp
|
||||
objArrayKlass.cpp markSweep.inline.hpp
|
||||
objArrayKlass.cpp mutexLocker.hpp
|
||||
objArrayKlass.cpp objArrayKlass.hpp
|
||||
objArrayKlass.cpp objArrayKlass.inline.hpp
|
||||
objArrayKlass.cpp objArrayKlassKlass.hpp
|
||||
objArrayKlass.cpp objArrayOop.hpp
|
||||
objArrayKlass.cpp oop.inline.hpp
|
||||
@ -3066,11 +3070,12 @@ objArrayKlass.cpp systemDictionary.hpp
|
||||
objArrayKlass.cpp universe.inline.hpp
|
||||
objArrayKlass.cpp vmSymbols.hpp
|
||||
|
||||
|
||||
objArrayKlass.hpp arrayKlass.hpp
|
||||
objArrayKlass.hpp instanceKlass.hpp
|
||||
objArrayKlass.hpp specialized_oop_closures.hpp
|
||||
|
||||
objArrayKlass.inline.hpp objArrayKlass.hpp
|
||||
|
||||
objArrayKlassKlass.cpp collectedHeap.inline.hpp
|
||||
objArrayKlassKlass.cpp instanceKlass.hpp
|
||||
objArrayKlassKlass.cpp javaClasses.hpp
|
||||
@ -4096,6 +4101,7 @@ task.cpp timer.hpp
|
||||
task.hpp top.hpp
|
||||
|
||||
taskqueue.cpp debug.hpp
|
||||
taskqueue.cpp oop.inline.hpp
|
||||
taskqueue.cpp os.hpp
|
||||
taskqueue.cpp taskqueue.hpp
|
||||
taskqueue.cpp thread_<os_family>.inline.hpp
|
||||
|
@ -115,10 +115,14 @@ objArrayKlass.cpp heapRegionSeq.inline.hpp
|
||||
objArrayKlass.cpp g1CollectedHeap.inline.hpp
|
||||
objArrayKlass.cpp g1OopClosures.inline.hpp
|
||||
objArrayKlass.cpp oop.pcgc.inline.hpp
|
||||
objArrayKlass.cpp psCompactionManager.hpp
|
||||
objArrayKlass.cpp psPromotionManager.inline.hpp
|
||||
objArrayKlass.cpp psScavenge.inline.hpp
|
||||
objArrayKlass.cpp parOopClosures.inline.hpp
|
||||
|
||||
objArrayKlass.inline.hpp psCompactionManager.inline.hpp
|
||||
objArrayKlass.inline.hpp psParallelCompact.hpp
|
||||
|
||||
oop.pcgc.inline.hpp parNewGeneration.hpp
|
||||
oop.pcgc.inline.hpp parallelScavengeHeap.hpp
|
||||
oop.pcgc.inline.hpp psCompactionManager.hpp
|
||||
|
@ -159,6 +159,7 @@ void GenMarkSweep::allocate_stacks() {
|
||||
_preserved_oop_stack = NULL;
|
||||
|
||||
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
@ -194,7 +195,6 @@ void GenMarkSweep::allocate_stacks() {
|
||||
|
||||
|
||||
void GenMarkSweep::deallocate_stacks() {
|
||||
|
||||
if (!UseG1GC) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->release_scratch();
|
||||
@ -208,6 +208,7 @@ void GenMarkSweep::deallocate_stacks() {
|
||||
}
|
||||
|
||||
delete _marking_stack;
|
||||
delete _objarray_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
|
||||
|
@ -28,10 +28,10 @@ class CardTableRS;
|
||||
class CardTableModRefBS;
|
||||
class DefNewGeneration;
|
||||
|
||||
template<class E> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop> OopTaskQueue;
|
||||
template<class E> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
|
||||
template<class E, unsigned int N> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop, TASKQUEUE_SIZE> OopTaskQueue;
|
||||
template<class T> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
|
||||
|
||||
// Closure for iterating roots from a particular generation
|
||||
// Note: all classes deriving from this MUST call this do_barrier
|
||||
|
@ -314,24 +314,24 @@ void objArrayKlass::initialize(TRAPS) {
|
||||
|
||||
void objArrayKlass::oop_follow_contents(oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
a->follow_header();
|
||||
ObjArrayKlass_OOP_ITERATE( \
|
||||
a, p, \
|
||||
/* we call mark_and_follow here to avoid excessive marking stack usage */ \
|
||||
MarkSweep::mark_and_follow(p))
|
||||
objArrayOop(obj)->follow_header();
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(obj, 0);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
a->follow_header(cm);
|
||||
ObjArrayKlass_OOP_ITERATE( \
|
||||
a, p, \
|
||||
/* we call mark_and_follow here to avoid excessive marking stack usage */ \
|
||||
PSParallelCompact::mark_and_follow(cm, p))
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
objArrayOop(obj)->follow_header(cm);
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(cm, obj, 0);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(cm, obj, 0);
|
||||
}
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
|
@ -91,10 +91,18 @@ class objArrayKlass : public arrayKlass {
|
||||
|
||||
// Garbage collection
|
||||
void oop_follow_contents(oop obj);
|
||||
inline void oop_follow_contents(oop obj, int index);
|
||||
template <class T> inline void objarray_follow_contents(oop obj, int index);
|
||||
|
||||
int oop_adjust_pointers(oop obj);
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
#ifndef SERIALGC
|
||||
inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index);
|
||||
template <class T> inline void
|
||||
objarray_follow_contents(ParCompactionManager* cm, oop obj, int index);
|
||||
#endif // !SERIALGC
|
||||
|
||||
// Iterators
|
||||
int oop_oop_iterate(oop obj, OopClosure* blk) {
|
||||
@ -131,5 +139,4 @@ class objArrayKlass : public arrayKlass {
|
||||
void oop_verify_on(oop obj, outputStream* st);
|
||||
void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
|
||||
void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
|
||||
|
||||
};
|
||||
|
89
hotspot/src/share/vm/oops/objArrayKlass.inline.hpp
Normal file
89
hotspot/src/share/vm/oops/objArrayKlass.inline.hpp
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
void objArrayKlass::oop_follow_contents(oop obj, int index) {
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(obj, index);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(obj, index);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void objArrayKlass::objarray_follow_contents(oop obj, int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
MarkSweep::mark_and_push<T>(e);
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
MarkSweep::push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj,
|
||||
int index) {
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(cm, obj, index);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(cm, obj, index);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void objArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj,
|
||||
int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
PSParallelCompact::mark_and_push<T>(cm, e);
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
cm->push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
#endif // #ifndef SERIALGC
|
@ -1346,9 +1346,7 @@ void Arguments::set_g1_gc_flags() {
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(MarkStackSize)) {
|
||||
// Size as a multiple of TaskQueueSuper::N which is larger
|
||||
// for 64-bit.
|
||||
FLAG_SET_DEFAULT(MarkStackSize, 128 * TaskQueueSuper::total_size());
|
||||
FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
|
||||
}
|
||||
if (PrintGCDetails && Verbose) {
|
||||
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
|
||||
|
@ -1795,6 +1795,10 @@ class CommandLineFlags {
|
||||
product(uintx, PreserveMarkStackSize, 1024, \
|
||||
"Size for stack used in promotion failure handling") \
|
||||
\
|
||||
develop(uintx, ObjArrayMarkingStride, 512, \
|
||||
"Number of ObjArray elements to push onto the marking stack" \
|
||||
"before pushing a continuation entry") \
|
||||
\
|
||||
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
|
||||
\
|
||||
product_pd(bool, ResizeTLAB, \
|
||||
|
@ -827,6 +827,8 @@ const int badCodeHeapFreeVal = 0xDD; // value used to zap
|
||||
#define badHeapWord (::badHeapWordVal)
|
||||
#define badJNIHandle ((oop)::badJNIHandleVal)
|
||||
|
||||
// Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
|
||||
#define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Utility functions for bitfield manipulations
|
||||
|
@ -31,10 +31,6 @@ uint ParallelTaskTerminator::_total_spins = 0;
|
||||
uint ParallelTaskTerminator::_total_peeks = 0;
|
||||
#endif
|
||||
|
||||
bool TaskQueueSuper::peek() {
|
||||
return _bottom != _age.top();
|
||||
}
|
||||
|
||||
int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
|
||||
const int a = 16807;
|
||||
const int m = 2147483647;
|
||||
@ -180,6 +176,13 @@ void ParallelTaskTerminator::reset_for_reuse() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ObjArrayTask::is_valid() const {
|
||||
return _obj != NULL && _obj->is_objArray() && _index > 0 &&
|
||||
_index < objArrayOop(_obj)->length();
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool RegionTaskQueueWithOverflow::is_empty() {
|
||||
return (_region_queue.size() == 0) &&
|
||||
(_overflow_stack->length() == 0);
|
||||
|
@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
template <unsigned int N>
|
||||
class TaskQueueSuper: public CHeapObj {
|
||||
protected:
|
||||
// Internal type for indexing the queue; also used for the tag.
|
||||
@ -30,10 +31,7 @@ protected:
|
||||
// The first free element after the last one pushed (mod N).
|
||||
volatile uint _bottom;
|
||||
|
||||
enum {
|
||||
N = 1 << NOT_LP64(14) LP64_ONLY(17), // Queue size: 16K or 128K
|
||||
MOD_N_MASK = N - 1 // To compute x mod N efficiently.
|
||||
};
|
||||
enum { MOD_N_MASK = N - 1 };
|
||||
|
||||
class Age {
|
||||
public:
|
||||
@ -84,12 +82,12 @@ protected:
|
||||
|
||||
// Returns a number in the range [0..N). If the result is "N-1", it should be
|
||||
// interpreted as 0.
|
||||
uint dirty_size(uint bot, uint top) {
|
||||
uint dirty_size(uint bot, uint top) const {
|
||||
return (bot - top) & MOD_N_MASK;
|
||||
}
|
||||
|
||||
// Returns the size corresponding to the given "bot" and "top".
|
||||
uint size(uint bot, uint top) {
|
||||
uint size(uint bot, uint top) const {
|
||||
uint sz = dirty_size(bot, top);
|
||||
// Has the queue "wrapped", so that bottom is less than top? There's a
|
||||
// complicated special case here. A pair of threads could perform pop_local
|
||||
@ -111,17 +109,17 @@ protected:
|
||||
public:
|
||||
TaskQueueSuper() : _bottom(0), _age() {}
|
||||
|
||||
// Return "true" if the TaskQueue contains any tasks.
|
||||
bool peek();
|
||||
// Return true if the TaskQueue contains any tasks.
|
||||
bool peek() { return _bottom != _age.top(); }
|
||||
|
||||
// Return an estimate of the number of elements in the queue.
|
||||
// The "careful" version admits the possibility of pop_local/pop_global
|
||||
// races.
|
||||
uint size() {
|
||||
uint size() const {
|
||||
return size(_bottom, _age.top());
|
||||
}
|
||||
|
||||
uint dirty_size() {
|
||||
uint dirty_size() const {
|
||||
return dirty_size(_bottom, _age.top());
|
||||
}
|
||||
|
||||
@ -132,19 +130,36 @@ public:
|
||||
|
||||
// Maximum number of elements allowed in the queue. This is two less
|
||||
// than the actual queue size, for somewhat complicated reasons.
|
||||
uint max_elems() { return N - 2; }
|
||||
uint max_elems() const { return N - 2; }
|
||||
|
||||
// Total size of queue.
|
||||
static const uint total_size() { return N; }
|
||||
};
|
||||
|
||||
template<class E> class GenericTaskQueue: public TaskQueueSuper {
|
||||
template<class E, unsigned int N = TASKQUEUE_SIZE>
|
||||
class GenericTaskQueue: public TaskQueueSuper<N> {
|
||||
protected:
|
||||
typedef typename TaskQueueSuper<N>::Age Age;
|
||||
typedef typename TaskQueueSuper<N>::idx_t idx_t;
|
||||
|
||||
using TaskQueueSuper<N>::_bottom;
|
||||
using TaskQueueSuper<N>::_age;
|
||||
using TaskQueueSuper<N>::increment_index;
|
||||
using TaskQueueSuper<N>::decrement_index;
|
||||
using TaskQueueSuper<N>::dirty_size;
|
||||
|
||||
public:
|
||||
using TaskQueueSuper<N>::max_elems;
|
||||
using TaskQueueSuper<N>::size;
|
||||
|
||||
private:
|
||||
// Slow paths for push, pop_local. (pop_global has no fast path.)
|
||||
bool push_slow(E t, uint dirty_n_elems);
|
||||
bool pop_local_slow(uint localBot, Age oldAge);
|
||||
|
||||
public:
|
||||
typedef E element_type;
|
||||
|
||||
// Initializes the queue to empty.
|
||||
GenericTaskQueue();
|
||||
|
||||
@ -175,19 +190,19 @@ private:
|
||||
volatile E* _elems;
|
||||
};
|
||||
|
||||
template<class E>
|
||||
GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() {
|
||||
template<class E, unsigned int N>
|
||||
GenericTaskQueue<E, N>::GenericTaskQueue() {
|
||||
assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
|
||||
}
|
||||
|
||||
template<class E>
|
||||
void GenericTaskQueue<E>::initialize() {
|
||||
template<class E, unsigned int N>
|
||||
void GenericTaskQueue<E, N>::initialize() {
|
||||
_elems = NEW_C_HEAP_ARRAY(E, N);
|
||||
guarantee(_elems != NULL, "Allocation failed.");
|
||||
}
|
||||
|
||||
template<class E>
|
||||
void GenericTaskQueue<E>::oops_do(OopClosure* f) {
|
||||
template<class E, unsigned int N>
|
||||
void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
|
||||
// tty->print_cr("START OopTaskQueue::oops_do");
|
||||
uint iters = size();
|
||||
uint index = _bottom;
|
||||
@ -203,21 +218,21 @@ void GenericTaskQueue<E>::oops_do(OopClosure* f) {
|
||||
// tty->print_cr("END OopTaskQueue::oops_do");
|
||||
}
|
||||
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueue<E>::push_slow(E t, uint dirty_n_elems) {
|
||||
template<class E, unsigned int N>
|
||||
bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
|
||||
if (dirty_n_elems == N - 1) {
|
||||
// Actually means 0, so do the push.
|
||||
uint localBot = _bottom;
|
||||
_elems[localBot] = t;
|
||||
// g++ complains if the volatile result of the assignment is unused.
|
||||
const_cast<E&>(_elems[localBot] = t);
|
||||
OrderAccess::release_store(&_bottom, increment_index(localBot));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueue<E>::
|
||||
template<class E, unsigned int N>
|
||||
bool GenericTaskQueue<E, N>::
|
||||
pop_local_slow(uint localBot, Age oldAge) {
|
||||
// This queue was observed to contain exactly one element; either this
|
||||
// thread will claim it, or a competing "pop_global". In either case,
|
||||
@ -249,8 +264,8 @@ pop_local_slow(uint localBot, Age oldAge) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueue<E>::pop_global(E& t) {
|
||||
template<class E, unsigned int N>
|
||||
bool GenericTaskQueue<E, N>::pop_global(E& t) {
|
||||
Age oldAge = _age.get();
|
||||
uint localBot = _bottom;
|
||||
uint n_elems = size(localBot, oldAge.top());
|
||||
@ -258,7 +273,7 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
|
||||
return false;
|
||||
}
|
||||
|
||||
t = _elems[oldAge.top()];
|
||||
const_cast<E&>(t = _elems[oldAge.top()]);
|
||||
Age newAge(oldAge);
|
||||
newAge.increment();
|
||||
Age resAge = _age.cmpxchg(newAge, oldAge);
|
||||
@ -269,8 +284,8 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
|
||||
return resAge == oldAge;
|
||||
}
|
||||
|
||||
template<class E>
|
||||
GenericTaskQueue<E>::~GenericTaskQueue() {
|
||||
template<class E, unsigned int N>
|
||||
GenericTaskQueue<E, N>::~GenericTaskQueue() {
|
||||
FREE_C_HEAP_ARRAY(E, _elems);
|
||||
}
|
||||
|
||||
@ -283,16 +298,18 @@ public:
|
||||
virtual bool peek() = 0;
|
||||
};
|
||||
|
||||
template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper {
|
||||
template<class T>
|
||||
class GenericTaskQueueSet: public TaskQueueSetSuper {
|
||||
private:
|
||||
uint _n;
|
||||
GenericTaskQueue<E>** _queues;
|
||||
T** _queues;
|
||||
|
||||
public:
|
||||
typedef typename T::element_type E;
|
||||
|
||||
GenericTaskQueueSet(int n) : _n(n) {
|
||||
typedef GenericTaskQueue<E>* GenericTaskQueuePtr;
|
||||
typedef T* GenericTaskQueuePtr;
|
||||
_queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
|
||||
guarantee(_queues != NULL, "Allocation failure.");
|
||||
for (int i = 0; i < n; i++) {
|
||||
_queues[i] = NULL;
|
||||
}
|
||||
@ -302,9 +319,9 @@ public:
|
||||
bool steal_best_of_2(uint queue_num, int* seed, E& t);
|
||||
bool steal_best_of_all(uint queue_num, int* seed, E& t);
|
||||
|
||||
void register_queue(uint i, GenericTaskQueue<E>* q);
|
||||
void register_queue(uint i, T* q);
|
||||
|
||||
GenericTaskQueue<E>* queue(uint n);
|
||||
T* queue(uint n);
|
||||
|
||||
// The thread with queue number "queue_num" (and whose random number seed
|
||||
// is at "seed") is trying to steal a task from some other queue. (It
|
||||
@ -316,27 +333,27 @@ public:
|
||||
bool peek();
|
||||
};
|
||||
|
||||
template<class E>
|
||||
void GenericTaskQueueSet<E>::register_queue(uint i, GenericTaskQueue<E>* q) {
|
||||
template<class T> void
|
||||
GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
|
||||
assert(i < _n, "index out of range.");
|
||||
_queues[i] = q;
|
||||
}
|
||||
|
||||
template<class E>
|
||||
GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(uint i) {
|
||||
template<class T> T*
|
||||
GenericTaskQueueSet<T>::queue(uint i) {
|
||||
return _queues[i];
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueueSet<E>::steal(uint queue_num, int* seed, E& t) {
|
||||
template<class T> bool
|
||||
GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
|
||||
for (uint i = 0; i < 2 * _n; i++)
|
||||
if (steal_best_of_2(queue_num, seed, t))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t) {
|
||||
template<class T> bool
|
||||
GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
|
||||
if (_n > 2) {
|
||||
int best_k;
|
||||
uint best_sz = 0;
|
||||
@ -359,8 +376,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t)
|
||||
}
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
|
||||
template<class T> bool
|
||||
GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
|
||||
if (_n > 2) {
|
||||
uint k = queue_num;
|
||||
while (k == queue_num) k = randomParkAndMiller(seed) % _n;
|
||||
@ -375,8 +392,8 @@ bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
|
||||
}
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
|
||||
template<class T> bool
|
||||
GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
|
||||
if (_n > 2) {
|
||||
uint k1 = queue_num;
|
||||
while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
|
||||
@ -397,8 +414,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
|
||||
}
|
||||
}
|
||||
|
||||
template<class E>
|
||||
bool GenericTaskQueueSet<E>::peek() {
|
||||
template<class T>
|
||||
bool GenericTaskQueueSet<T>::peek() {
|
||||
// Try all the queues.
|
||||
for (uint j = 0; j < _n; j++) {
|
||||
if (_queues[j]->peek())
|
||||
@ -468,14 +485,16 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
template<class E> inline bool GenericTaskQueue<E>::push(E t) {
|
||||
template<class E, unsigned int N> inline bool
|
||||
GenericTaskQueue<E, N>::push(E t) {
|
||||
uint localBot = _bottom;
|
||||
assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
|
||||
idx_t top = _age.top();
|
||||
uint dirty_n_elems = dirty_size(localBot, top);
|
||||
assert((dirty_n_elems >= 0) && (dirty_n_elems < N), "n_elems out of range.");
|
||||
assert(dirty_n_elems < N, "n_elems out of range.");
|
||||
if (dirty_n_elems < max_elems()) {
|
||||
_elems[localBot] = t;
|
||||
// g++ complains if the volatile result of the assignment is unused.
|
||||
const_cast<E&>(_elems[localBot] = t);
|
||||
OrderAccess::release_store(&_bottom, increment_index(localBot));
|
||||
return true;
|
||||
} else {
|
||||
@ -483,7 +502,8 @@ template<class E> inline bool GenericTaskQueue<E>::push(E t) {
|
||||
}
|
||||
}
|
||||
|
||||
template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
|
||||
template<class E, unsigned int N> inline bool
|
||||
GenericTaskQueue<E, N>::pop_local(E& t) {
|
||||
uint localBot = _bottom;
|
||||
// This value cannot be N-1. That can only occur as a result of
|
||||
// the assignment to bottom in this method. If it does, this method
|
||||
@ -497,7 +517,7 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
|
||||
// This is necessary to prevent any read below from being reordered
|
||||
// before the store just above.
|
||||
OrderAccess::fence();
|
||||
t = _elems[localBot];
|
||||
const_cast<E&>(t = _elems[localBot]);
|
||||
// This is a second read of "age"; the "size()" above is the first.
|
||||
// If there's still at least one element in the queue, based on the
|
||||
// "_bottom" and "age" we've read, then there can be no interference with
|
||||
@ -514,17 +534,23 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
|
||||
}
|
||||
|
||||
typedef oop Task;
|
||||
typedef GenericTaskQueue<Task> OopTaskQueue;
|
||||
typedef GenericTaskQueueSet<Task> OopTaskQueueSet;
|
||||
typedef GenericTaskQueue<Task> OopTaskQueue;
|
||||
typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
|
||||
|
||||
|
||||
#define COMPRESSED_OOP_MASK 1
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
// warning C4522: multiple assignment operators specified
|
||||
#pragma warning(disable:4522)
|
||||
#endif
|
||||
|
||||
// This is a container class for either an oop* or a narrowOop*.
|
||||
// Both are pushed onto a task queue and the consumer will test is_narrow()
|
||||
// to determine which should be processed.
|
||||
class StarTask {
|
||||
void* _holder; // either union oop* or narrowOop*
|
||||
|
||||
enum { COMPRESSED_OOP_MASK = 1 };
|
||||
|
||||
public:
|
||||
StarTask(narrowOop* p) {
|
||||
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
|
||||
@ -540,20 +566,61 @@ class StarTask {
|
||||
return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
|
||||
}
|
||||
|
||||
// Operators to preserve const/volatile in assignments required by gcc
|
||||
void operator=(const volatile StarTask& t) volatile { _holder = t._holder; }
|
||||
StarTask& operator=(const StarTask& t) {
|
||||
_holder = t._holder;
|
||||
return *this;
|
||||
}
|
||||
volatile StarTask& operator=(const volatile StarTask& t) volatile {
|
||||
_holder = t._holder;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool is_narrow() const {
|
||||
return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
|
||||
}
|
||||
};
|
||||
|
||||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
||||
class ObjArrayTask
|
||||
{
|
||||
public:
|
||||
ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
|
||||
ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
|
||||
assert(idx <= size_t(max_jint), "too big");
|
||||
}
|
||||
ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
|
||||
|
||||
ObjArrayTask& operator =(const ObjArrayTask& t) {
|
||||
_obj = t._obj;
|
||||
_index = t._index;
|
||||
return *this;
|
||||
}
|
||||
volatile ObjArrayTask&
|
||||
operator =(const volatile ObjArrayTask& t) volatile {
|
||||
_obj = t._obj;
|
||||
_index = t._index;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline oop obj() const { return _obj; }
|
||||
inline int index() const { return _index; }
|
||||
|
||||
DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
|
||||
|
||||
private:
|
||||
oop _obj;
|
||||
int _index;
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||
typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
|
||||
|
||||
typedef size_t RegionTask; // index for region
|
||||
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||
typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
|
||||
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
|
||||
|
||||
class RegionTaskQueueWithOverflow: public CHeapObj {
|
||||
protected:
|
||||
|
Loading…
x
Reference in New Issue
Block a user