6423256: GC stacks should use a better data structure
6942771: SEGV in ParScanThreadState::take_from_overflow_stack Reviewed-by: apetrusenko, ysr, pbk
This commit is contained in:
parent
aff36499e7
commit
1cdd538ea5
@ -540,8 +540,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_is_alive_closure(_span, &_markBitMap),
|
||||
_restart_addr(NULL),
|
||||
_overflow_list(NULL),
|
||||
_preserved_oop_stack(NULL),
|
||||
_preserved_mark_stack(NULL),
|
||||
_stats(cmsGen),
|
||||
_eden_chunk_array(NULL), // may be set in ctor body
|
||||
_eden_chunk_capacity(0), // -- ditto --
|
||||
@ -8907,23 +8905,10 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
|
||||
// failures where possible, thus, incrementally hardening the VM
|
||||
// in such low resource situations.
|
||||
void CMSCollector::preserve_mark_work(oop p, markOop m) {
|
||||
if (_preserved_oop_stack == NULL) {
|
||||
assert(_preserved_mark_stack == NULL,
|
||||
"bijection with preserved_oop_stack");
|
||||
// Allocate the stacks
|
||||
_preserved_oop_stack = new (ResourceObj::C_HEAP)
|
||||
GrowableArray<oop>(PreserveMarkStackSize, true);
|
||||
_preserved_mark_stack = new (ResourceObj::C_HEAP)
|
||||
GrowableArray<markOop>(PreserveMarkStackSize, true);
|
||||
if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
|
||||
vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
|
||||
"Preserved Mark/Oop Stack for CMS (C-heap)");
|
||||
}
|
||||
}
|
||||
_preserved_oop_stack->push(p);
|
||||
_preserved_mark_stack->push(m);
|
||||
_preserved_oop_stack.push(p);
|
||||
_preserved_mark_stack.push(m);
|
||||
assert(m == p->mark(), "Mark word changed");
|
||||
assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
|
||||
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
"bijection");
|
||||
}
|
||||
|
||||
@ -8965,42 +8950,30 @@ void CMSCollector::par_preserve_mark_if_necessary(oop p) {
|
||||
// effect on performance so great that this will
|
||||
// likely just be in the noise anyway.
|
||||
void CMSCollector::restore_preserved_marks_if_any() {
|
||||
if (_preserved_oop_stack == NULL) {
|
||||
assert(_preserved_mark_stack == NULL,
|
||||
"bijection with preserved_oop_stack");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"world should be stopped");
|
||||
assert(Thread::current()->is_ConcurrentGC_thread() ||
|
||||
Thread::current()->is_VM_thread(),
|
||||
"should be single-threaded");
|
||||
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
"bijection");
|
||||
|
||||
int length = _preserved_oop_stack->length();
|
||||
assert(_preserved_mark_stack->length() == length, "bijection");
|
||||
for (int i = 0; i < length; i++) {
|
||||
oop p = _preserved_oop_stack->at(i);
|
||||
while (!_preserved_oop_stack.is_empty()) {
|
||||
oop p = _preserved_oop_stack.pop();
|
||||
assert(p->is_oop(), "Should be an oop");
|
||||
assert(_span.contains(p), "oop should be in _span");
|
||||
assert(p->mark() == markOopDesc::prototype(),
|
||||
"Set when taken from overflow list");
|
||||
markOop m = _preserved_mark_stack->at(i);
|
||||
markOop m = _preserved_mark_stack.pop();
|
||||
p->set_mark(m);
|
||||
}
|
||||
_preserved_mark_stack->clear();
|
||||
_preserved_oop_stack->clear();
|
||||
assert(_preserved_mark_stack->is_empty() &&
|
||||
_preserved_oop_stack->is_empty(),
|
||||
assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
|
||||
"stacks were cleared above");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool CMSCollector::no_preserved_marks() const {
|
||||
return ( ( _preserved_mark_stack == NULL
|
||||
&& _preserved_oop_stack == NULL)
|
||||
|| ( _preserved_mark_stack->is_empty()
|
||||
&& _preserved_oop_stack->is_empty()));
|
||||
return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -537,8 +537,8 @@ class CMSCollector: public CHeapObj {
|
||||
// The following array-pair keeps track of mark words
|
||||
// displaced for accomodating overflow list above.
|
||||
// This code will likely be revisited under RFE#4922830.
|
||||
GrowableArray<oop>* _preserved_oop_stack;
|
||||
GrowableArray<markOop>* _preserved_mark_stack;
|
||||
Stack<oop> _preserved_oop_stack;
|
||||
Stack<markOop> _preserved_mark_stack;
|
||||
|
||||
int* _hash_seed;
|
||||
|
||||
|
@ -1691,8 +1691,8 @@ public:
|
||||
ref = new_ref;
|
||||
}
|
||||
|
||||
int refs_to_scan() { return refs()->size(); }
|
||||
int overflowed_refs_to_scan() { return refs()->overflow_stack()->length(); }
|
||||
int refs_to_scan() { return (int)refs()->size(); }
|
||||
int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
|
@ -101,22 +101,6 @@ void G1MarkSweep::allocate_stacks() {
|
||||
GenMarkSweep::_preserved_count_max = 0;
|
||||
GenMarkSweep::_preserved_marks = NULL;
|
||||
GenMarkSweep::_preserved_count = 0;
|
||||
GenMarkSweep::_preserved_mark_stack = NULL;
|
||||
GenMarkSweep::_preserved_oop_stack = NULL;
|
||||
|
||||
GenMarkSweep::_marking_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
GenMarkSweep::_objarray_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
GenMarkSweep::_revisit_klass_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
|
||||
// for now until we have a chance to work out a more optimal setting.
|
||||
GenMarkSweep::_revisit_mdo_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
|
||||
}
|
||||
|
||||
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
@ -145,7 +129,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
|
||||
// Follow system dictionary roots and unload classes
|
||||
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(),
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Follow code cache roots (has to be done after system dictionary,
|
||||
@ -157,19 +141,19 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
GenMarkSweep::follow_weak_klass_links();
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(),
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Visit memoized MDO's and clear any unmarked weak refs
|
||||
GenMarkSweep::follow_mdo_weak_refs();
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
|
||||
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(&GenMarkSweep::is_alive);
|
||||
StringTable::unlink(&GenMarkSweep::is_alive);
|
||||
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(),
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
}
|
||||
|
||||
|
@ -171,6 +171,7 @@ concurrentMarkSweepGeneration.hpp generation.hpp
|
||||
concurrentMarkSweepGeneration.hpp generationCounters.hpp
|
||||
concurrentMarkSweepGeneration.hpp memoryService.hpp
|
||||
concurrentMarkSweepGeneration.hpp mutexLocker.hpp
|
||||
concurrentMarkSweepGeneration.hpp stack.inline.hpp
|
||||
concurrentMarkSweepGeneration.hpp taskqueue.hpp
|
||||
concurrentMarkSweepGeneration.hpp virtualspace.hpp
|
||||
concurrentMarkSweepGeneration.hpp yieldingWorkgroup.hpp
|
||||
|
@ -187,9 +187,11 @@ psCompactionManager.cpp parMarkBitMap.hpp
|
||||
psCompactionManager.cpp psParallelCompact.hpp
|
||||
psCompactionManager.cpp psCompactionManager.hpp
|
||||
psCompactionManager.cpp psOldGen.hpp
|
||||
psCompactionManager.cpp stack.inline.hpp
|
||||
psCompactionManager.cpp systemDictionary.hpp
|
||||
|
||||
psCompactionManager.hpp allocation.hpp
|
||||
psCompactionManager.hpp stack.hpp
|
||||
psCompactionManager.hpp taskqueue.hpp
|
||||
|
||||
psCompactionManager.inline.hpp psCompactionManager.hpp
|
||||
@ -233,12 +235,14 @@ psMarkSweep.cpp referencePolicy.hpp
|
||||
psMarkSweep.cpp referenceProcessor.hpp
|
||||
psMarkSweep.cpp safepoint.hpp
|
||||
psMarkSweep.cpp spaceDecorator.hpp
|
||||
psMarkSweep.cpp stack.inline.hpp
|
||||
psMarkSweep.cpp symbolTable.hpp
|
||||
psMarkSweep.cpp systemDictionary.hpp
|
||||
psMarkSweep.cpp vmThread.hpp
|
||||
|
||||
psMarkSweep.hpp markSweep.inline.hpp
|
||||
psMarkSweep.hpp collectorCounters.hpp
|
||||
psMarkSweep.hpp stack.hpp
|
||||
|
||||
psMarkSweepDecorator.cpp liveRange.hpp
|
||||
psMarkSweepDecorator.cpp markSweep.inline.hpp
|
||||
@ -280,6 +284,7 @@ psParallelCompact.cpp psYoungGen.hpp
|
||||
psParallelCompact.cpp referencePolicy.hpp
|
||||
psParallelCompact.cpp referenceProcessor.hpp
|
||||
psParallelCompact.cpp safepoint.hpp
|
||||
psParallelCompact.cpp stack.inline.hpp
|
||||
psParallelCompact.cpp symbolTable.hpp
|
||||
psParallelCompact.cpp systemDictionary.hpp
|
||||
psParallelCompact.cpp vmThread.hpp
|
||||
@ -367,6 +372,7 @@ psScavenge.cpp referencePolicy.hpp
|
||||
psScavenge.cpp referenceProcessor.hpp
|
||||
psScavenge.cpp resourceArea.hpp
|
||||
psScavenge.cpp spaceDecorator.hpp
|
||||
psScavenge.cpp stack.inline.hpp
|
||||
psScavenge.cpp threadCritical.hpp
|
||||
psScavenge.cpp vmThread.hpp
|
||||
psScavenge.cpp vm_operations.hpp
|
||||
@ -376,6 +382,7 @@ psScavenge.hpp cardTableExtension.hpp
|
||||
psScavenge.hpp collectorCounters.hpp
|
||||
psScavenge.hpp oop.hpp
|
||||
psScavenge.hpp psVirtualspace.hpp
|
||||
psScavenge.hpp stack.hpp
|
||||
|
||||
psScavenge.inline.hpp cardTableExtension.hpp
|
||||
psScavenge.inline.hpp parallelScavengeHeap.hpp
|
||||
|
@ -93,11 +93,13 @@ markSweep.cpp oop.inline.hpp
|
||||
markSweep.hpp growableArray.hpp
|
||||
markSweep.hpp markOop.hpp
|
||||
markSweep.hpp oop.hpp
|
||||
markSweep.hpp stack.hpp
|
||||
markSweep.hpp timer.hpp
|
||||
markSweep.hpp universe.hpp
|
||||
|
||||
markSweep.inline.hpp collectedHeap.hpp
|
||||
markSweep.inline.hpp markSweep.hpp
|
||||
markSweep.inline.hpp stack.inline.hpp
|
||||
|
||||
mutableSpace.hpp immutableSpace.hpp
|
||||
mutableSpace.hpp memRegion.hpp
|
||||
|
@ -34,12 +34,12 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
Generation* old_gen_,
|
||||
int thread_num_,
|
||||
ObjToScanQueueSet* work_queue_set_,
|
||||
GrowableArray<oop>** overflow_stack_set_,
|
||||
Stack<oop>* overflow_stacks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
|
||||
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
|
||||
_overflow_stack(overflow_stack_set_[thread_num_]),
|
||||
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
|
||||
_ageTable(false), // false ==> not the global age table, no perf data.
|
||||
_to_space_alloc_buffer(desired_plab_sz_),
|
||||
_to_space_closure(gen_, this), _old_gen_closure(gen_, this),
|
||||
@ -159,11 +159,12 @@ bool ParScanThreadState::take_from_overflow_stack() {
|
||||
assert(ParGCUseLocalOverflow, "Else should not call");
|
||||
assert(young_gen()->overflow_list() == NULL, "Error");
|
||||
ObjToScanQueue* queue = work_queue();
|
||||
GrowableArray<oop>* of_stack = overflow_stack();
|
||||
uint num_overflow_elems = of_stack->length();
|
||||
uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4,
|
||||
(juint)ParGCDesiredObjsFromOverflowList),
|
||||
num_overflow_elems);
|
||||
Stack<oop>* const of_stack = overflow_stack();
|
||||
const size_t num_overflow_elems = of_stack->size();
|
||||
const size_t space_available = queue->max_elems() - queue->size();
|
||||
const size_t num_take_elems = MIN3(space_available / 4,
|
||||
ParGCDesiredObjsFromOverflowList,
|
||||
num_overflow_elems);
|
||||
// Transfer the most recent num_take_elems from the overflow
|
||||
// stack to our work queue.
|
||||
for (size_t i = 0; i != num_take_elems; i++) {
|
||||
@ -271,7 +272,7 @@ public:
|
||||
ParNewGeneration& gen,
|
||||
Generation& old_gen,
|
||||
ObjToScanQueueSet& queue_set,
|
||||
GrowableArray<oop>** overflow_stacks_,
|
||||
Stack<oop>* overflow_stacks_,
|
||||
size_t desired_plab_sz,
|
||||
ParallelTaskTerminator& term);
|
||||
|
||||
@ -302,17 +303,19 @@ private:
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(
|
||||
int num_threads, Space& to_space, ParNewGeneration& gen,
|
||||
Generation& old_gen, ObjToScanQueueSet& queue_set,
|
||||
GrowableArray<oop>** overflow_stack_set_,
|
||||
Stack<oop>* overflow_stacks,
|
||||
size_t desired_plab_sz, ParallelTaskTerminator& term)
|
||||
: ResourceArray(sizeof(ParScanThreadState), num_threads),
|
||||
_gen(gen), _next_gen(old_gen), _term(term)
|
||||
{
|
||||
assert(num_threads > 0, "sanity check!");
|
||||
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
|
||||
"overflow_stack allocation mismatch");
|
||||
// Initialize states.
|
||||
for (int i = 0; i < num_threads; ++i) {
|
||||
new ((ParScanThreadState*)_data + i)
|
||||
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
|
||||
overflow_stack_set_, desired_plab_sz, term);
|
||||
overflow_stacks, desired_plab_sz, term);
|
||||
}
|
||||
}
|
||||
|
||||
@ -596,14 +599,11 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
|
||||
for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
|
||||
_task_queues->queue(i2)->initialize();
|
||||
|
||||
_overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads);
|
||||
guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure");
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
if (ParGCUseLocalOverflow) {
|
||||
_overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true);
|
||||
guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure.");
|
||||
} else {
|
||||
_overflow_stacks[i] = NULL;
|
||||
_overflow_stacks = NULL;
|
||||
if (ParGCUseLocalOverflow) {
|
||||
_overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);
|
||||
for (size_t i = 0; i < ParallelGCThreads; ++i) {
|
||||
new (_overflow_stacks + i) Stack<oop>();
|
||||
}
|
||||
}
|
||||
|
||||
@ -937,12 +937,9 @@ void ParNewGeneration::collect(bool full,
|
||||
} else {
|
||||
assert(HandlePromotionFailure,
|
||||
"Should only be here if promotion failure handling is on");
|
||||
if (_promo_failure_scan_stack != NULL) {
|
||||
// Can be non-null because of reference processing.
|
||||
// Free stack with its elements.
|
||||
delete _promo_failure_scan_stack;
|
||||
_promo_failure_scan_stack = NULL;
|
||||
}
|
||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||
|
||||
remove_forwarding_pointers();
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print(" (promotion failed)");
|
||||
@ -1397,8 +1394,8 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
|
||||
size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
assert(par_scan_state->overflow_stack() == NULL, "Error");
|
||||
assert(!UseCompressedOops, "Error");
|
||||
assert(par_scan_state->overflow_stack() == NULL, "Error");
|
||||
if (_overflow_list == NULL) return false;
|
||||
|
||||
// Otherwise, there was something there; try claiming the list.
|
||||
|
@ -52,7 +52,7 @@ class ParScanThreadState {
|
||||
friend class ParScanThreadStateSet;
|
||||
private:
|
||||
ObjToScanQueue *_work_queue;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
Stack<oop>* const _overflow_stack;
|
||||
|
||||
ParGCAllocBuffer _to_space_alloc_buffer;
|
||||
|
||||
@ -120,7 +120,7 @@ class ParScanThreadState {
|
||||
ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
|
||||
Generation* old_gen_, int thread_num_,
|
||||
ObjToScanQueueSet* work_queue_set_,
|
||||
GrowableArray<oop>** overflow_stack_set_,
|
||||
Stack<oop>* overflow_stacks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_);
|
||||
|
||||
@ -144,7 +144,7 @@ class ParScanThreadState {
|
||||
void trim_queues(int max_size);
|
||||
|
||||
// Private overflow stack usage
|
||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||
Stack<oop>* overflow_stack() { return _overflow_stack; }
|
||||
bool take_from_overflow_stack();
|
||||
void push_on_overflow_stack(oop p);
|
||||
|
||||
@ -301,7 +301,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
ObjToScanQueueSet* _task_queues;
|
||||
|
||||
// Per-worker-thread local overflow stacks
|
||||
GrowableArray<oop>** _overflow_stacks;
|
||||
Stack<oop>* _overflow_stacks;
|
||||
|
||||
// Desired size of survivor space plab's
|
||||
PLABStats _plab_stats;
|
||||
|
@ -59,8 +59,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
assert(cm->stacks_have_been_allocated(),
|
||||
"Stack space has not been allocated");
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
|
||||
switch (_root_type) {
|
||||
@ -119,7 +117,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
// Do the real work
|
||||
cm->follow_marking_stacks();
|
||||
// cm->deallocate_stacks();
|
||||
}
|
||||
|
||||
|
||||
@ -135,8 +132,6 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
assert(cm->stacks_have_been_allocated(),
|
||||
"Stack space has not been allocated");
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
|
||||
_rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
|
||||
|
@ -46,23 +46,6 @@ ParCompactionManager::ParCompactionManager() :
|
||||
marking_stack()->initialize();
|
||||
_objarray_stack.initialize();
|
||||
region_stack()->initialize();
|
||||
|
||||
// Note that _revisit_klass_stack is allocated out of the
|
||||
// C heap (as opposed to out of ResourceArena).
|
||||
int size =
|
||||
(SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
|
||||
// have to do for now until we are able to investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
}
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
// _manager_array and _stack_array are statics
|
||||
// shared with all instances of ParCompactionManager
|
||||
// should not be deallocated.
|
||||
}
|
||||
|
||||
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
@ -134,9 +117,9 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
|
||||
}
|
||||
|
||||
void ParCompactionManager::reset() {
|
||||
for(uint i=0; i<ParallelGCThreads+1; i++) {
|
||||
manager_array(i)->revisit_klass_stack()->clear();
|
||||
manager_array(i)->revisit_mdo_stack()->clear();
|
||||
for(uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
assert(manager_array(i)->revisit_klass_stack()->is_empty(), "sanity");
|
||||
assert(manager_array(i)->revisit_mdo_stack()->is_empty(), "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,10 +161,3 @@ void ParCompactionManager::drain_region_stacks() {
|
||||
}
|
||||
} while (!region_stack()->is_empty());
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ParCompactionManager::stacks_have_been_allocated() {
|
||||
return (revisit_klass_stack()->data_addr() != NULL &&
|
||||
revisit_mdo_stack()->data_addr() != NULL);
|
||||
}
|
||||
#endif
|
||||
|
@ -80,10 +80,9 @@ private:
|
||||
// type of TaskQueue.
|
||||
RegionTaskQueue _region_stack;
|
||||
|
||||
#if 1 // does this happen enough to need a per thread stack?
|
||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
GrowableArray<DataLayout*>* _revisit_mdo_stack;
|
||||
#endif
|
||||
Stack<Klass*> _revisit_klass_stack;
|
||||
Stack<DataLayout*> _revisit_mdo_stack;
|
||||
|
||||
static ParMarkBitMap* _mark_bitmap;
|
||||
|
||||
Action _action;
|
||||
@ -113,10 +112,7 @@ private:
|
||||
inline static ParCompactionManager* manager_array(int index);
|
||||
|
||||
ParCompactionManager();
|
||||
~ParCompactionManager();
|
||||
|
||||
void allocate_stacks();
|
||||
void deallocate_stacks();
|
||||
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
|
||||
|
||||
// Take actions in preparation for a compaction.
|
||||
@ -129,11 +125,8 @@ private:
|
||||
bool should_verify_only();
|
||||
bool should_reset_only();
|
||||
|
||||
#if 1
|
||||
// Probably stays as a growable array
|
||||
GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
|
||||
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
|
||||
#endif
|
||||
Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
|
||||
Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
|
||||
|
||||
// Save for later processing. Must not fail.
|
||||
inline void push(oop obj) { _marking_stack.push(obj); }
|
||||
@ -162,10 +155,6 @@ private:
|
||||
// Process tasks remaining on any stack
|
||||
void drain_region_stacks();
|
||||
|
||||
// Debugging support
|
||||
#ifdef ASSERT
|
||||
bool stacks_have_been_allocated();
|
||||
#endif
|
||||
};
|
||||
|
||||
inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
|
||||
|
@ -466,33 +466,16 @@ void PSMarkSweep::allocate_stacks() {
|
||||
_preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
|
||||
// Now divide by the size of a PreservedMark
|
||||
_preserved_count_max /= sizeof(PreservedMark);
|
||||
|
||||
_preserved_mark_stack = NULL;
|
||||
_preserved_oop_stack = NULL;
|
||||
|
||||
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
|
||||
// now until we investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
}
|
||||
|
||||
|
||||
void PSMarkSweep::deallocate_stacks() {
|
||||
if (_preserved_oop_stack) {
|
||||
delete _preserved_mark_stack;
|
||||
_preserved_mark_stack = NULL;
|
||||
delete _preserved_oop_stack;
|
||||
_preserved_oop_stack = NULL;
|
||||
}
|
||||
|
||||
delete _marking_stack;
|
||||
delete _objarray_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
_preserved_mark_stack.clear(true);
|
||||
_preserved_oop_stack.clear(true);
|
||||
_marking_stack.clear();
|
||||
_objarray_stack.clear(true);
|
||||
_revisit_klass_stack.clear(true);
|
||||
_revisit_mdo_stack.clear(true);
|
||||
}
|
||||
|
||||
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
@ -542,17 +525,17 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
follow_weak_klass_links();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
|
||||
// Visit memoized mdo's and clear unmarked weak refs
|
||||
follow_mdo_weak_refs();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(is_alive_closure());
|
||||
StringTable::unlink(is_alive_closure());
|
||||
|
||||
assert(_marking_stack->is_empty(), "stack should be empty by now");
|
||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
||||
}
|
||||
|
||||
|
||||
|
@ -2170,6 +2170,16 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
heap->update_counters();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
|
||||
ParCompactionManager* const cm =
|
||||
ParCompactionManager::manager_array(int(i));
|
||||
assert(cm->marking_stack()->is_empty(), "should be empty");
|
||||
assert(cm->region_stack()->is_empty(), "should be empty");
|
||||
assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
@ -2711,21 +2721,22 @@ PSParallelCompact::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
// Update and follow all subklass, sibling and implementor links.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d",
|
||||
SystemDictionary::number_of_classes());
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
|
||||
KeepAliveClosure keep_alive_closure(cm);
|
||||
int length = cm->revisit_klass_stack()->length();
|
||||
Stack<Klass*>* const rks = cm->revisit_klass_stack();
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
|
||||
gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
|
||||
i, rks->size());
|
||||
}
|
||||
for (int j = 0; j < length; j++) {
|
||||
cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
|
||||
is_alive_closure(),
|
||||
&keep_alive_closure);
|
||||
while (!rks->is_empty()) {
|
||||
Klass* const k = rks->pop();
|
||||
k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
|
||||
}
|
||||
// revisit_klass_stack is cleared in reset()
|
||||
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
}
|
||||
@ -2744,19 +2755,20 @@ void PSParallelCompact::follow_mdo_weak_refs() {
|
||||
// we can visit and clear any weak references from MDO's which
|
||||
// we memoized during the strong marking phase.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d",
|
||||
SystemDictionary::number_of_classes());
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
|
||||
GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
|
||||
int length = rms->length();
|
||||
Stack<DataLayout*>* rms = cm->revisit_mdo_stack();
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
|
||||
gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
|
||||
i, rms->size());
|
||||
}
|
||||
for (int j = 0; j < length; j++) {
|
||||
rms->at(j)->follow_weak_refs(is_alive_closure());
|
||||
while (!rms->is_empty()) {
|
||||
rms->pop()->follow_weak_refs(is_alive_closure());
|
||||
}
|
||||
// revisit_mdo_stack is cleared in reset()
|
||||
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,6 @@ void PSPromotionManager::reset() {
|
||||
|
||||
|
||||
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -34,9 +34,10 @@ bool PSScavenge::_survivor_overflow = false;
|
||||
int PSScavenge::_tenuring_threshold = 0;
|
||||
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
||||
elapsedTimer PSScavenge::_accumulated_time;
|
||||
GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL;
|
||||
GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL;
|
||||
Stack<markOop> PSScavenge::_preserved_mark_stack;
|
||||
Stack<oop> PSScavenge::_preserved_oop_stack;
|
||||
CollectorCounters* PSScavenge::_counters = NULL;
|
||||
bool PSScavenge::_promotion_failed = false;
|
||||
|
||||
// Define before use
|
||||
class PSIsAliveClosure: public BoolObjectClosure {
|
||||
@ -223,6 +224,9 @@ bool PSScavenge::invoke_no_policy() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
|
||||
assert(_preserved_mark_stack.is_empty(), "should be empty");
|
||||
assert(_preserved_oop_stack.is_empty(), "should be empty");
|
||||
|
||||
TimeStamp scavenge_entry;
|
||||
TimeStamp scavenge_midpoint;
|
||||
TimeStamp scavenge_exit;
|
||||
@ -636,24 +640,20 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
young_gen->object_iterate(&unforward_closure);
|
||||
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Restoring %d marks",
|
||||
_preserved_oop_stack->length());
|
||||
gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
|
||||
}
|
||||
|
||||
// Restore any saved marks.
|
||||
for (int i=0; i < _preserved_oop_stack->length(); i++) {
|
||||
oop obj = _preserved_oop_stack->at(i);
|
||||
markOop mark = _preserved_mark_stack->at(i);
|
||||
while (!_preserved_oop_stack.is_empty()) {
|
||||
oop obj = _preserved_oop_stack.pop();
|
||||
markOop mark = _preserved_mark_stack.pop();
|
||||
obj->set_mark(mark);
|
||||
}
|
||||
|
||||
// Deallocate the preserved mark and oop stacks.
|
||||
// The stacks were allocated as CHeap objects, so
|
||||
// we must call delete to prevent mem leaks.
|
||||
delete _preserved_mark_stack;
|
||||
_preserved_mark_stack = NULL;
|
||||
delete _preserved_oop_stack;
|
||||
_preserved_oop_stack = NULL;
|
||||
// Clear the preserved mark and oop stack caches.
|
||||
_preserved_mark_stack.clear(true);
|
||||
_preserved_oop_stack.clear(true);
|
||||
_promotion_failed = false;
|
||||
}
|
||||
|
||||
// Reset the PromotionFailureALot counters.
|
||||
@ -661,27 +661,16 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
}
|
||||
|
||||
// This method is called whenever an attempt to promote an object
|
||||
// fails. Some markOops will need preserving, some will not. Note
|
||||
// fails. Some markOops will need preservation, some will not. Note
|
||||
// that the entire eden is traversed after a failed promotion, with
|
||||
// all forwarded headers replaced by the default markOop. This means
|
||||
// it is not neccessary to preserve most markOops.
|
||||
void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
|
||||
if (_preserved_mark_stack == NULL) {
|
||||
ThreadCritical tc; // Lock and retest
|
||||
if (_preserved_mark_stack == NULL) {
|
||||
assert(_preserved_oop_stack == NULL, "Sanity");
|
||||
_preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
|
||||
_preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
|
||||
}
|
||||
}
|
||||
|
||||
// Because we must hold the ThreadCritical lock before using
|
||||
// the stacks, we should be safe from observing partial allocations,
|
||||
// which are also guarded by the ThreadCritical lock.
|
||||
_promotion_failed = true;
|
||||
if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
|
||||
ThreadCritical tc;
|
||||
_preserved_oop_stack->push(obj);
|
||||
_preserved_mark_stack->push(obj_mark);
|
||||
_preserved_oop_stack.push(obj);
|
||||
_preserved_mark_stack.push(obj_mark);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,9 +61,10 @@ class PSScavenge: AllStatic {
|
||||
static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen.
|
||||
// This is used to decide if an oop should be scavenged,
|
||||
// cards should be marked, etc.
|
||||
static GrowableArray<markOop>* _preserved_mark_stack; // List of marks to be restored after failed promotion
|
||||
static GrowableArray<oop>* _preserved_oop_stack; // List of oops that need their mark restored.
|
||||
static Stack<markOop> _preserved_mark_stack; // List of marks to be restored after failed promotion
|
||||
static Stack<oop> _preserved_oop_stack; // List of oops that need their mark restored.
|
||||
static CollectorCounters* _counters; // collector performance counters
|
||||
static bool _promotion_failed;
|
||||
|
||||
static void clean_up_failed_promotion();
|
||||
|
||||
@ -79,8 +80,7 @@ class PSScavenge: AllStatic {
|
||||
// Accessors
|
||||
static int tenuring_threshold() { return _tenuring_threshold; }
|
||||
static elapsedTimer* accumulated_time() { return &_accumulated_time; }
|
||||
static bool promotion_failed()
|
||||
{ return _preserved_mark_stack != NULL; }
|
||||
static bool promotion_failed() { return _promotion_failed; }
|
||||
static int consecutive_skipped_scavenges()
|
||||
{ return _consecutive_skipped_scavenges; }
|
||||
|
||||
|
@ -25,13 +25,13 @@
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_markSweep.cpp.incl"
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
|
||||
GrowableArray<ObjArrayTask>* MarkSweep::_objarray_stack = NULL;
|
||||
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
|
||||
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
|
||||
Stack<oop> MarkSweep::_marking_stack;
|
||||
Stack<DataLayout*> MarkSweep::_revisit_mdo_stack;
|
||||
Stack<Klass*> MarkSweep::_revisit_klass_stack;
|
||||
Stack<ObjArrayTask> MarkSweep::_objarray_stack;
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
|
||||
GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
|
||||
Stack<oop> MarkSweep::_preserved_oop_stack;
|
||||
Stack<markOop> MarkSweep::_preserved_mark_stack;
|
||||
size_t MarkSweep::_preserved_count = 0;
|
||||
size_t MarkSweep::_preserved_count_max = 0;
|
||||
PreservedMark* MarkSweep::_preserved_marks = NULL;
|
||||
@ -58,37 +58,42 @@ GrowableArray<size_t> * MarkSweep::_last_gc_live_oops_size = NULL;
|
||||
#endif
|
||||
|
||||
void MarkSweep::revisit_weak_klass_link(Klass* k) {
|
||||
_revisit_klass_stack->push(k);
|
||||
_revisit_klass_stack.push(k);
|
||||
}
|
||||
|
||||
void MarkSweep::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
// Update and follow all subklass, sibling and implementor links.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d",
|
||||
SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit klass stack size = " SIZE_FORMAT,
|
||||
_revisit_klass_stack.size());
|
||||
}
|
||||
for (int i = 0; i < _revisit_klass_stack->length(); i++) {
|
||||
_revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
|
||||
while (!_revisit_klass_stack.is_empty()) {
|
||||
Klass* const k = _revisit_klass_stack.pop();
|
||||
k->follow_weak_klass_links(&is_alive, &keep_alive);
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
void MarkSweep::revisit_mdo(DataLayout* p) {
|
||||
_revisit_mdo_stack->push(p);
|
||||
_revisit_mdo_stack.push(p);
|
||||
}
|
||||
|
||||
void MarkSweep::follow_mdo_weak_refs() {
|
||||
// All strongly reachable oops have been marked at this point;
|
||||
// we can visit and clear any weak references from MDO's which
|
||||
// we memoized during the strong marking phase.
|
||||
assert(_marking_stack->is_empty(), "Marking stack should be empty");
|
||||
assert(_marking_stack.is_empty(), "Marking stack should be empty");
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d",
|
||||
SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit MDO stack size = " SIZE_FORMAT,
|
||||
_revisit_mdo_stack.size());
|
||||
}
|
||||
for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
|
||||
_revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
|
||||
while (!_revisit_mdo_stack.is_empty()) {
|
||||
_revisit_mdo_stack.pop()->follow_weak_refs(&is_alive);
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
@ -106,41 +111,37 @@ void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
|
||||
|
||||
void MarkSweep::follow_stack() {
|
||||
do {
|
||||
while (!_marking_stack->is_empty()) {
|
||||
oop obj = _marking_stack->pop();
|
||||
while (!_marking_stack.is_empty()) {
|
||||
oop obj = _marking_stack.pop();
|
||||
assert (obj->is_gc_marked(), "p must be marked");
|
||||
obj->follow_contents();
|
||||
}
|
||||
// Process ObjArrays one at a time to avoid marking stack bloat.
|
||||
if (!_objarray_stack->is_empty()) {
|
||||
ObjArrayTask task = _objarray_stack->pop();
|
||||
if (!_objarray_stack.is_empty()) {
|
||||
ObjArrayTask task = _objarray_stack.pop();
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(task.obj(), task.index());
|
||||
}
|
||||
} while (!_marking_stack->is_empty() || !_objarray_stack->is_empty());
|
||||
} while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
|
||||
}
|
||||
|
||||
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
||||
|
||||
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||
|
||||
// We preserve the mark which should be replaced at the end and the location that it
|
||||
// will go. Note that the object that this markOop belongs to isn't currently at that
|
||||
// address but it will be after phase4
|
||||
// We preserve the mark which should be replaced at the end and the location
|
||||
// that it will go. Note that the object that this markOop belongs to isn't
|
||||
// currently at that address but it will be after phase4
|
||||
void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
||||
// we try to store preserved marks in the to space of the new generation since this
|
||||
// is storage which should be available. Most of the time this should be sufficient
|
||||
// space for the marks we need to preserve but if it isn't we fall back in using
|
||||
// GrowableArrays to keep track of the overflow.
|
||||
// We try to store preserved marks in the to space of the new generation since
|
||||
// this is storage which should be available. Most of the time this should be
|
||||
// sufficient space for the marks we need to preserve but if it isn't we fall
|
||||
// back to using Stacks to keep track of the overflow.
|
||||
if (_preserved_count < _preserved_count_max) {
|
||||
_preserved_marks[_preserved_count++].init(obj, mark);
|
||||
} else {
|
||||
if (_preserved_mark_stack == NULL) {
|
||||
_preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
|
||||
_preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
|
||||
}
|
||||
_preserved_mark_stack->push(mark);
|
||||
_preserved_oop_stack->push(obj);
|
||||
_preserved_mark_stack.push(mark);
|
||||
_preserved_oop_stack.push(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,8 +152,7 @@ void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
|
||||
void MarkSweep::adjust_marks() {
|
||||
assert(_preserved_oop_stack == NULL ||
|
||||
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
|
||||
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
"inconsistent preserved oop stacks");
|
||||
|
||||
// adjust the oops we saved earlier
|
||||
@ -161,21 +161,19 @@ void MarkSweep::adjust_marks() {
|
||||
}
|
||||
|
||||
// deal with the overflow stack
|
||||
if (_preserved_oop_stack) {
|
||||
for (int i = 0; i < _preserved_oop_stack->length(); i++) {
|
||||
oop* p = _preserved_oop_stack->adr_at(i);
|
||||
adjust_pointer(p);
|
||||
}
|
||||
StackIterator<oop> iter(_preserved_oop_stack);
|
||||
while (!iter.is_empty()) {
|
||||
oop* p = iter.next_addr();
|
||||
adjust_pointer(p);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::restore_marks() {
|
||||
assert(_preserved_oop_stack == NULL ||
|
||||
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
|
||||
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
"inconsistent preserved oop stacks");
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Restoring %d marks", _preserved_count +
|
||||
(_preserved_oop_stack ? _preserved_oop_stack->length() : 0));
|
||||
gclog_or_tty->print_cr("Restoring %d marks",
|
||||
_preserved_count + _preserved_oop_stack.size());
|
||||
}
|
||||
|
||||
// restore the marks we saved earlier
|
||||
@ -184,12 +182,10 @@ void MarkSweep::restore_marks() {
|
||||
}
|
||||
|
||||
// deal with the overflow
|
||||
if (_preserved_oop_stack) {
|
||||
for (int i = 0; i < _preserved_oop_stack->length(); i++) {
|
||||
oop obj = _preserved_oop_stack->at(i);
|
||||
markOop mark = _preserved_mark_stack->at(i);
|
||||
obj->set_mark(mark);
|
||||
}
|
||||
while (!_preserved_oop_stack.is_empty()) {
|
||||
oop obj = _preserved_oop_stack.pop();
|
||||
markOop mark = _preserved_mark_stack.pop();
|
||||
obj->set_mark(mark);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,23 +104,22 @@ class MarkSweep : AllStatic {
|
||||
friend class KeepAliveClosure;
|
||||
friend class VM_MarkSweep;
|
||||
friend void marksweep_init();
|
||||
friend class DataLayout;
|
||||
|
||||
//
|
||||
// Vars
|
||||
//
|
||||
protected:
|
||||
// Traversal stacks used during phase1
|
||||
static GrowableArray<oop>* _marking_stack;
|
||||
static GrowableArray<ObjArrayTask>* _objarray_stack;
|
||||
static Stack<oop> _marking_stack;
|
||||
static Stack<ObjArrayTask> _objarray_stack;
|
||||
// Stack for live klasses to revisit at end of marking phase
|
||||
static GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
static Stack<Klass*> _revisit_klass_stack;
|
||||
// Set (stack) of MDO's to revisit at end of marking phase
|
||||
static GrowableArray<DataLayout*>* _revisit_mdo_stack;
|
||||
static Stack<DataLayout*> _revisit_mdo_stack;
|
||||
|
||||
// Space for storing/restoring mark word
|
||||
static GrowableArray<markOop>* _preserved_mark_stack;
|
||||
static GrowableArray<oop>* _preserved_oop_stack;
|
||||
static Stack<markOop> _preserved_mark_stack;
|
||||
static Stack<oop> _preserved_oop_stack;
|
||||
static size_t _preserved_count;
|
||||
static size_t _preserved_count_max;
|
||||
static PreservedMark* _preserved_marks;
|
||||
|
@ -72,7 +72,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
_marking_stack->push(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -80,7 +80,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack->push(task);
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
|
||||
|
@ -1435,12 +1435,14 @@ defNewGeneration.cpp oop.inline.hpp
|
||||
defNewGeneration.cpp referencePolicy.hpp
|
||||
defNewGeneration.cpp space.inline.hpp
|
||||
defNewGeneration.cpp spaceDecorator.hpp
|
||||
defNewGeneration.cpp stack.inline.hpp
|
||||
defNewGeneration.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
defNewGeneration.hpp ageTable.hpp
|
||||
defNewGeneration.hpp cSpaceCounters.hpp
|
||||
defNewGeneration.hpp generation.inline.hpp
|
||||
defNewGeneration.hpp generationCounters.hpp
|
||||
defNewGeneration.hpp stack.hpp
|
||||
|
||||
defNewGeneration.inline.hpp cardTableRS.hpp
|
||||
defNewGeneration.inline.hpp defNewGeneration.hpp
|
||||
@ -3852,6 +3854,10 @@ specialized_oop_closures.cpp specialized_oop_closures.hpp
|
||||
|
||||
specialized_oop_closures.hpp atomic.hpp
|
||||
|
||||
stack.hpp allocation.inline.hpp
|
||||
|
||||
stack.inline.hpp stack.hpp
|
||||
|
||||
stackMapFrame.cpp globalDefinitions.hpp
|
||||
stackMapFrame.cpp handles.inline.hpp
|
||||
stackMapFrame.cpp oop.inline.hpp
|
||||
@ -4095,6 +4101,7 @@ task.hpp top.hpp
|
||||
taskqueue.cpp debug.hpp
|
||||
taskqueue.cpp oop.inline.hpp
|
||||
taskqueue.cpp os.hpp
|
||||
taskqueue.cpp stack.inline.hpp
|
||||
taskqueue.cpp taskqueue.hpp
|
||||
taskqueue.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
@ -4102,6 +4109,7 @@ taskqueue.hpp allocation.hpp
|
||||
taskqueue.hpp allocation.inline.hpp
|
||||
taskqueue.hpp mutex.hpp
|
||||
taskqueue.hpp orderAccess_<os_arch>.inline.hpp
|
||||
taskqueue.hpp stack.hpp
|
||||
|
||||
templateInterpreter.cpp interpreter.hpp
|
||||
templateInterpreter.cpp interpreterGenerator.hpp
|
||||
|
@ -289,16 +289,17 @@ private:
|
||||
|
||||
// One of the following macros must be used when allocating
|
||||
// an array or object from an arena
|
||||
#define NEW_ARENA_ARRAY(arena, type, size)\
|
||||
(type*) arena->Amalloc((size) * sizeof(type))
|
||||
#define NEW_ARENA_ARRAY(arena, type, size) \
|
||||
(type*) (arena)->Amalloc((size) * sizeof(type))
|
||||
|
||||
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)\
|
||||
(type*) arena->Arealloc((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
|
||||
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
|
||||
(type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
|
||||
(new_size) * sizeof(type) )
|
||||
|
||||
#define FREE_ARENA_ARRAY(arena, type, old, size)\
|
||||
arena->Afree((char*)(old), (size) * sizeof(type))
|
||||
#define FREE_ARENA_ARRAY(arena, type, old, size) \
|
||||
(arena)->Afree((char*)(old), (size) * sizeof(type))
|
||||
|
||||
#define NEW_ARENA_OBJ(arena, type)\
|
||||
#define NEW_ARENA_OBJ(arena, type) \
|
||||
NEW_ARENA_ARRAY(arena, type, 1)
|
||||
|
||||
|
||||
|
@ -87,9 +87,7 @@ void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
|
||||
_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
|
||||
_scan_older);
|
||||
} while (!_gch->no_allocs_since_save_marks(_level));
|
||||
guarantee(_gen->promo_failure_scan_stack() == NULL
|
||||
|| _gen->promo_failure_scan_stack()->length() == 0,
|
||||
"Failed to finish scan");
|
||||
guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
|
||||
}
|
||||
|
||||
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
|
||||
@ -130,9 +128,6 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
int level,
|
||||
const char* policy)
|
||||
: Generation(rs, initial_size, level),
|
||||
_objs_with_preserved_marks(NULL),
|
||||
_preserved_marks_of_objs(NULL),
|
||||
_promo_failure_scan_stack(NULL),
|
||||
_promo_failure_drain_in_progress(false),
|
||||
_should_allocate_from_space(false)
|
||||
{
|
||||
@ -604,12 +599,8 @@ void DefNewGeneration::collect(bool full,
|
||||
} else {
|
||||
assert(HandlePromotionFailure,
|
||||
"Should not be here unless promotion failure handling is on");
|
||||
assert(_promo_failure_scan_stack != NULL &&
|
||||
_promo_failure_scan_stack->length() == 0, "post condition");
|
||||
|
||||
// deallocate stack and it's elements
|
||||
delete _promo_failure_scan_stack;
|
||||
_promo_failure_scan_stack = NULL;
|
||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||
|
||||
remove_forwarding_pointers();
|
||||
if (PrintGCDetails) {
|
||||
@ -620,7 +611,7 @@ void DefNewGeneration::collect(bool full,
|
||||
// case there can be live objects in to-space
|
||||
// as a result of a partial evacuation of eden
|
||||
// and from-space.
|
||||
swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect().
|
||||
swap_spaces(); // For uniformity wrt ParNewGeneration.
|
||||
from()->set_next_compaction_space(to());
|
||||
gch->set_incremental_collection_will_fail();
|
||||
|
||||
@ -653,34 +644,23 @@ void DefNewGeneration::remove_forwarding_pointers() {
|
||||
RemoveForwardPointerClosure rspc;
|
||||
eden()->object_iterate(&rspc);
|
||||
from()->object_iterate(&rspc);
|
||||
|
||||
// Now restore saved marks, if any.
|
||||
if (_objs_with_preserved_marks != NULL) {
|
||||
assert(_preserved_marks_of_objs != NULL, "Both or none.");
|
||||
assert(_objs_with_preserved_marks->length() ==
|
||||
_preserved_marks_of_objs->length(), "Both or none.");
|
||||
for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
|
||||
oop obj = _objs_with_preserved_marks->at(i);
|
||||
markOop m = _preserved_marks_of_objs->at(i);
|
||||
obj->set_mark(m);
|
||||
}
|
||||
delete _objs_with_preserved_marks;
|
||||
delete _preserved_marks_of_objs;
|
||||
_objs_with_preserved_marks = NULL;
|
||||
_preserved_marks_of_objs = NULL;
|
||||
assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
|
||||
"should be the same");
|
||||
while (!_objs_with_preserved_marks.is_empty()) {
|
||||
oop obj = _objs_with_preserved_marks.pop();
|
||||
markOop m = _preserved_marks_of_objs.pop();
|
||||
obj->set_mark(m);
|
||||
}
|
||||
_objs_with_preserved_marks.clear(true);
|
||||
_preserved_marks_of_objs.clear(true);
|
||||
}
|
||||
|
||||
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
if (m->must_be_preserved_for_promotion_failure(obj)) {
|
||||
if (_objs_with_preserved_marks == NULL) {
|
||||
assert(_preserved_marks_of_objs == NULL, "Both or none.");
|
||||
_objs_with_preserved_marks = new (ResourceObj::C_HEAP)
|
||||
GrowableArray<oop>(PreserveMarkStackSize, true);
|
||||
_preserved_marks_of_objs = new (ResourceObj::C_HEAP)
|
||||
GrowableArray<markOop>(PreserveMarkStackSize, true);
|
||||
}
|
||||
_objs_with_preserved_marks->push(obj);
|
||||
_preserved_marks_of_objs->push(m);
|
||||
_objs_with_preserved_marks.push(obj);
|
||||
_preserved_marks_of_objs.push(m);
|
||||
}
|
||||
}
|
||||
|
||||
@ -695,7 +675,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
|
||||
old->forward_to(old);
|
||||
_promotion_failed = true;
|
||||
|
||||
push_on_promo_failure_scan_stack(old);
|
||||
_promo_failure_scan_stack.push(old);
|
||||
|
||||
if (!_promo_failure_drain_in_progress) {
|
||||
// prevent recursion in copy_to_survivor_space()
|
||||
@ -748,20 +728,9 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
|
||||
if (_promo_failure_scan_stack == NULL) {
|
||||
_promo_failure_scan_stack = new (ResourceObj::C_HEAP)
|
||||
GrowableArray<oop>(40, true);
|
||||
}
|
||||
|
||||
_promo_failure_scan_stack->push(obj);
|
||||
}
|
||||
|
||||
void DefNewGeneration::drain_promo_failure_scan_stack() {
|
||||
assert(_promo_failure_scan_stack != NULL, "precondition");
|
||||
|
||||
while (_promo_failure_scan_stack->length() > 0) {
|
||||
oop obj = _promo_failure_scan_stack->pop();
|
||||
while (!_promo_failure_scan_stack.is_empty()) {
|
||||
oop obj = _promo_failure_scan_stack.pop();
|
||||
obj->oop_iterate(_promo_failure_scan_stack_closure);
|
||||
}
|
||||
}
|
||||
|
@ -77,10 +77,10 @@ protected:
|
||||
// word being overwritten with a self-forwarding-pointer.
|
||||
void preserve_mark_if_necessary(oop obj, markOop m);
|
||||
|
||||
// When one is non-null, so is the other. Together, they each pair is
|
||||
// an object with a preserved mark, and its mark value.
|
||||
GrowableArray<oop>* _objs_with_preserved_marks;
|
||||
GrowableArray<markOop>* _preserved_marks_of_objs;
|
||||
// Together, these keep <object with a preserved mark, mark value> pairs.
|
||||
// They should always contain the same number of elements.
|
||||
Stack<oop> _objs_with_preserved_marks;
|
||||
Stack<markOop> _preserved_marks_of_objs;
|
||||
|
||||
// Returns true if the collection can be safely attempted.
|
||||
// If this method returns false, a collection is not
|
||||
@ -94,11 +94,7 @@ protected:
|
||||
_promo_failure_scan_stack_closure = scan_stack_closure;
|
||||
}
|
||||
|
||||
GrowableArray<oop>* _promo_failure_scan_stack;
|
||||
GrowableArray<oop>* promo_failure_scan_stack() const {
|
||||
return _promo_failure_scan_stack;
|
||||
}
|
||||
void push_on_promo_failure_scan_stack(oop);
|
||||
Stack<oop> _promo_failure_scan_stack;
|
||||
void drain_promo_failure_scan_stack(void);
|
||||
bool _promo_failure_drain_in_progress;
|
||||
|
||||
@ -184,8 +180,6 @@ protected:
|
||||
void do_void();
|
||||
};
|
||||
|
||||
class FastEvacuateFollowersClosure;
|
||||
friend class FastEvacuateFollowersClosure;
|
||||
class FastEvacuateFollowersClosure: public VoidClosure {
|
||||
GenCollectedHeap* _gch;
|
||||
int _level;
|
||||
@ -336,6 +330,10 @@ protected:
|
||||
|
||||
void verify(bool allow_dirty);
|
||||
|
||||
bool promo_failure_scan_is_complete() const {
|
||||
return _promo_failure_scan_stack.is_empty();
|
||||
}
|
||||
|
||||
protected:
|
||||
// If clear_space is true, clear the survivor spaces. Eden is
|
||||
// cleared if the minimum size of eden is 0. If mangle_space
|
||||
|
@ -161,17 +161,6 @@ void GenMarkSweep::allocate_stacks() {
|
||||
|
||||
_preserved_marks = (PreservedMark*)scratch;
|
||||
_preserved_count = 0;
|
||||
_preserved_mark_stack = NULL;
|
||||
_preserved_oop_stack = NULL;
|
||||
|
||||
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
|
||||
// now until we have had a chance to investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
@ -206,17 +195,12 @@ void GenMarkSweep::deallocate_stacks() {
|
||||
gch->release_scratch();
|
||||
}
|
||||
|
||||
if (_preserved_oop_stack) {
|
||||
delete _preserved_mark_stack;
|
||||
_preserved_mark_stack = NULL;
|
||||
delete _preserved_oop_stack;
|
||||
_preserved_oop_stack = NULL;
|
||||
}
|
||||
|
||||
delete _marking_stack;
|
||||
delete _objarray_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
_preserved_mark_stack.clear(true);
|
||||
_preserved_oop_stack.clear(true);
|
||||
_marking_stack.clear();
|
||||
_objarray_stack.clear(true);
|
||||
_revisit_klass_stack.clear(true);
|
||||
_revisit_mdo_stack.clear(true);
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
@ -274,17 +258,17 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
follow_weak_klass_links();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
|
||||
// Visit memoized MDO's and clear any unmarked weak refs
|
||||
follow_mdo_weak_refs();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(&is_alive);
|
||||
StringTable::unlink(&is_alive);
|
||||
|
||||
assert(_marking_stack->is_empty(), "stack should be empty by now");
|
||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
||||
}
|
||||
|
||||
|
||||
|
@ -641,6 +641,9 @@ class CommandLineFlags {
|
||||
develop(bool, ZapJNIHandleArea, trueInDebug, \
|
||||
"Zap freed JNI handle space with 0xFEFEFEFE") \
|
||||
\
|
||||
notproduct(bool, ZapStackSegments, trueInDebug, \
|
||||
"Zap allocated/freed Stack segments with 0xFADFADED") \
|
||||
\
|
||||
develop(bool, ZapUnusedHeapArea, trueInDebug, \
|
||||
"Zap unused heap space with 0xBAADBABE") \
|
||||
\
|
||||
|
@ -1073,6 +1073,7 @@ void WatcherThread::run() {
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (is_error_reported()) {
|
||||
// A fatal error has happened, the error handler(VMError::report_and_die)
|
||||
// should abort JVM after creating an error log file. However in some
|
||||
@ -1100,6 +1101,7 @@ void WatcherThread::run() {
|
||||
os::sleep(this, 5 * 1000, false);
|
||||
}
|
||||
}
|
||||
#endif // #if 0
|
||||
|
||||
PeriodicTask::real_time_tick(time_to_wait);
|
||||
|
||||
|
204
hotspot/src/share/vm/utilities/stack.hpp
Normal file
204
hotspot/src/share/vm/utilities/stack.hpp
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Class Stack (below) grows and shrinks by linking together "segments" which
|
||||
// are allocated on demand. Segments are arrays of the element type (E) plus an
|
||||
// extra pointer-sized field to store the segment link. Recently emptied
|
||||
// segments are kept in a cache and reused.
|
||||
//
|
||||
// Notes/caveats:
|
||||
//
|
||||
// The size of an element must either evenly divide the size of a pointer or be
|
||||
// a multiple of the size of a pointer.
|
||||
//
|
||||
// Destructors are not called for elements popped off the stack, so element
|
||||
// types which rely on destructors for things like reference counting will not
|
||||
// work properly.
|
||||
//
|
||||
// Class Stack allocates segments from the C heap. However, two protected
|
||||
// virtual methods are used to alloc/free memory which subclasses can override:
|
||||
//
|
||||
// virtual void* alloc(size_t bytes);
|
||||
// virtual void free(void* addr, size_t bytes);
|
||||
//
|
||||
// The alloc() method must return storage aligned for any use. The
|
||||
// implementation in class Stack assumes that alloc() will terminate the process
|
||||
// if the allocation fails.
|
||||
|
||||
template <class E> class StackIterator;
|
||||
|
||||
// StackBase holds common data/methods that don't depend on the element type,
|
||||
// factored out to reduce template code duplication.
|
||||
class StackBase
|
||||
{
|
||||
public:
|
||||
size_t segment_size() const { return _seg_size; } // Elements per segment.
|
||||
size_t max_size() const { return _max_size; } // Max elements allowed.
|
||||
size_t max_cache_size() const { return _max_cache_size; } // Max segments
|
||||
// allowed in cache.
|
||||
|
||||
size_t cache_size() const { return _cache_size; } // Segments in the cache.
|
||||
|
||||
protected:
|
||||
// The ctor arguments correspond to the like-named functions above.
|
||||
// segment_size: number of items per segment
|
||||
// max_cache_size: maxmium number of *segments* to cache
|
||||
// max_size: maximum number of items allowed, rounded to a multiple of
|
||||
// the segment size (0 == unlimited)
|
||||
inline StackBase(size_t segment_size, size_t max_cache_size, size_t max_size);
|
||||
|
||||
// Round max_size to a multiple of the segment size. Treat 0 as unlimited.
|
||||
static inline size_t adjust_max_size(size_t max_size, size_t seg_size);
|
||||
|
||||
protected:
|
||||
const size_t _seg_size; // Number of items per segment.
|
||||
const size_t _max_size; // Maximum number of items allowed in the stack.
|
||||
const size_t _max_cache_size; // Maximum number of segments to cache.
|
||||
size_t _cur_seg_size; // Number of items in the current segment.
|
||||
size_t _full_seg_size; // Number of items in already-filled segments.
|
||||
size_t _cache_size; // Number of segments in the cache.
|
||||
};
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define inline
|
||||
#endif // __GNUC__
|
||||
|
||||
template <class E>
|
||||
class Stack: public StackBase
|
||||
{
|
||||
public:
|
||||
friend class StackIterator<E>;
|
||||
|
||||
// segment_size: number of items per segment
|
||||
// max_cache_size: maxmium number of *segments* to cache
|
||||
// max_size: maximum number of items allowed, rounded to a multiple of
|
||||
// the segment size (0 == unlimited)
|
||||
inline Stack(size_t segment_size = default_segment_size(),
|
||||
size_t max_cache_size = 4, size_t max_size = 0);
|
||||
inline ~Stack() { clear(true); }
|
||||
|
||||
inline bool is_empty() const { return _cur_seg == NULL; }
|
||||
inline bool is_full() const { return _full_seg_size >= max_size(); }
|
||||
|
||||
// Performance sensitive code should use is_empty() instead of size() == 0 and
|
||||
// is_full() instead of size() == max_size(). Using a conditional here allows
|
||||
// just one var to be updated when pushing/popping elements instead of two;
|
||||
// _full_seg_size is updated only when pushing/popping segments.
|
||||
inline size_t size() const {
|
||||
return is_empty() ? 0 : _full_seg_size + _cur_seg_size;
|
||||
}
|
||||
|
||||
inline void push(E elem);
|
||||
inline E pop();
|
||||
|
||||
// Clear everything from the stack, releasing the associated memory. If
|
||||
// clear_cache is true, also release any cached segments.
|
||||
void clear(bool clear_cache = false);
|
||||
|
||||
static inline size_t default_segment_size();
|
||||
|
||||
protected:
|
||||
// Each segment includes space for _seg_size elements followed by a link
|
||||
// (pointer) to the previous segment; the space is allocated as a single block
|
||||
// of size segment_bytes(). _seg_size is rounded up if necessary so the link
|
||||
// is properly aligned. The C struct for the layout would be:
|
||||
//
|
||||
// struct segment {
|
||||
// E elements[_seg_size];
|
||||
// E* link;
|
||||
// };
|
||||
|
||||
// Round up seg_size to keep the link field aligned.
|
||||
static inline size_t adjust_segment_size(size_t seg_size);
|
||||
|
||||
// Methods for allocation size and getting/setting the link.
|
||||
inline size_t link_offset() const; // Byte offset of link field.
|
||||
inline size_t segment_bytes() const; // Segment size in bytes.
|
||||
inline E** link_addr(E* seg) const; // Address of the link field.
|
||||
inline E* get_link(E* seg) const; // Extract the link from seg.
|
||||
inline E* set_link(E* new_seg, E* old_seg); // new_seg.link = old_seg.
|
||||
|
||||
virtual E* alloc(size_t bytes);
|
||||
virtual void free(E* addr, size_t bytes);
|
||||
|
||||
void push_segment();
|
||||
void pop_segment();
|
||||
|
||||
void free_segments(E* seg); // Free all segments in the list.
|
||||
inline void reset(bool reset_cache); // Reset all data fields.
|
||||
|
||||
DEBUG_ONLY(void verify(bool at_empty_transition) const;)
|
||||
DEBUG_ONLY(void zap_segment(E* seg, bool zap_link_field) const;)
|
||||
|
||||
private:
|
||||
E* _cur_seg; // Current segment.
|
||||
E* _cache; // Segment cache to avoid ping-ponging.
|
||||
};
|
||||
|
||||
template <class E> class ResourceStack: public Stack<E>, public ResourceObj
|
||||
{
|
||||
public:
|
||||
// If this class becomes widely used, it may make sense to save the Thread
|
||||
// and use it when allocating segments.
|
||||
ResourceStack(size_t segment_size = Stack<E>::default_segment_size()):
|
||||
Stack<E>(segment_size, max_uintx)
|
||||
{ }
|
||||
|
||||
// Set the segment pointers to NULL so the parent dtor does not free them;
|
||||
// that must be done by the ResourceMark code.
|
||||
~ResourceStack() { Stack<E>::reset(true); }
|
||||
|
||||
protected:
|
||||
virtual E* alloc(size_t bytes);
|
||||
virtual void free(E* addr, size_t bytes);
|
||||
|
||||
private:
|
||||
void clear(bool clear_cache = false);
|
||||
};
|
||||
|
||||
template <class E>
|
||||
class StackIterator: public StackObj
|
||||
{
|
||||
public:
|
||||
StackIterator(Stack<E>& stack): _stack(stack) { sync(); }
|
||||
|
||||
Stack<E>& stack() const { return _stack; }
|
||||
|
||||
bool is_empty() const { return _cur_seg == NULL; }
|
||||
|
||||
E next() { return *next_addr(); }
|
||||
E* next_addr();
|
||||
|
||||
void sync(); // Sync the iterator's state to the stack's current state.
|
||||
|
||||
private:
|
||||
Stack<E>& _stack;
|
||||
size_t _cur_seg_size;
|
||||
E* _cur_seg;
|
||||
size_t _full_seg_size;
|
||||
};
|
||||
|
||||
#ifdef __GNUC__
|
||||
#undef inline
|
||||
#endif // __GNUC__
|
273
hotspot/src/share/vm/utilities/stack.inline.hpp
Normal file
273
hotspot/src/share/vm/utilities/stack.inline.hpp
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
StackBase::StackBase(size_t segment_size, size_t max_cache_size,
|
||||
size_t max_size):
|
||||
_seg_size(segment_size),
|
||||
_max_cache_size(max_cache_size),
|
||||
_max_size(adjust_max_size(max_size, segment_size))
|
||||
{
|
||||
assert(_max_size % _seg_size == 0, "not a multiple");
|
||||
}
|
||||
|
||||
size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size)
|
||||
{
|
||||
assert(seg_size > 0, "cannot be 0");
|
||||
assert(max_size >= seg_size || max_size == 0, "max_size too small");
|
||||
const size_t limit = max_uintx - (seg_size - 1);
|
||||
if (max_size == 0 || max_size > limit) {
|
||||
max_size = limit;
|
||||
}
|
||||
return (max_size + seg_size - 1) / seg_size * seg_size;
|
||||
}
|
||||
|
||||
template <class E>
|
||||
Stack<E>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
|
||||
StackBase(adjust_segment_size(segment_size), max_cache_size, max_size)
|
||||
{
|
||||
reset(true);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::push(E item)
|
||||
{
|
||||
assert(!is_full(), "pushing onto a full stack");
|
||||
if (_cur_seg_size == _seg_size) {
|
||||
push_segment();
|
||||
}
|
||||
_cur_seg[_cur_seg_size] = item;
|
||||
++_cur_seg_size;
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E Stack<E>::pop()
|
||||
{
|
||||
assert(!is_empty(), "popping from an empty stack");
|
||||
if (_cur_seg_size == 1) {
|
||||
E tmp = _cur_seg[--_cur_seg_size];
|
||||
pop_segment();
|
||||
return tmp;
|
||||
}
|
||||
return _cur_seg[--_cur_seg_size];
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::clear(bool clear_cache)
|
||||
{
|
||||
free_segments(_cur_seg);
|
||||
if (clear_cache) free_segments(_cache);
|
||||
reset(clear_cache);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
size_t Stack<E>::default_segment_size()
|
||||
{
|
||||
// Number of elements that fit in 4K bytes minus the size of two pointers
|
||||
// (link field and malloc header).
|
||||
return (4096 - 2 * sizeof(E*)) / sizeof(E);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
size_t Stack<E>::adjust_segment_size(size_t seg_size)
|
||||
{
|
||||
const size_t elem_sz = sizeof(E);
|
||||
const size_t ptr_sz = sizeof(E*);
|
||||
assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
|
||||
if (elem_sz < ptr_sz) {
|
||||
return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
|
||||
}
|
||||
return seg_size;
|
||||
}
|
||||
|
||||
template <class E>
|
||||
size_t Stack<E>::link_offset() const
|
||||
{
|
||||
return align_size_up(_seg_size * sizeof(E), sizeof(E*));
|
||||
}
|
||||
|
||||
template <class E>
|
||||
size_t Stack<E>::segment_bytes() const
|
||||
{
|
||||
return link_offset() + sizeof(E*);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E** Stack<E>::link_addr(E* seg) const
|
||||
{
|
||||
return (E**) ((char*)seg + link_offset());
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E* Stack<E>::get_link(E* seg) const
|
||||
{
|
||||
return *link_addr(seg);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E* Stack<E>::set_link(E* new_seg, E* old_seg)
|
||||
{
|
||||
*link_addr(new_seg) = old_seg;
|
||||
return new_seg;
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E* Stack<E>::alloc(size_t bytes)
|
||||
{
|
||||
return (E*) NEW_C_HEAP_ARRAY(char, bytes);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::free(E* addr, size_t bytes)
|
||||
{
|
||||
FREE_C_HEAP_ARRAY(char, (char*) addr);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::push_segment()
|
||||
{
|
||||
assert(_cur_seg_size == _seg_size, "current segment is not full");
|
||||
E* next;
|
||||
if (_cache_size > 0) {
|
||||
// Use a cached segment.
|
||||
next = _cache;
|
||||
_cache = get_link(_cache);
|
||||
--_cache_size;
|
||||
} else {
|
||||
next = alloc(segment_bytes());
|
||||
DEBUG_ONLY(zap_segment(next, true);)
|
||||
}
|
||||
const bool at_empty_transition = is_empty();
|
||||
_cur_seg = set_link(next, _cur_seg);
|
||||
_cur_seg_size = 0;
|
||||
_full_seg_size += at_empty_transition ? 0 : _seg_size;
|
||||
DEBUG_ONLY(verify(at_empty_transition);)
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::pop_segment()
|
||||
{
|
||||
assert(_cur_seg_size == 0, "current segment is not empty");
|
||||
E* const prev = get_link(_cur_seg);
|
||||
if (_cache_size < _max_cache_size) {
|
||||
// Add the current segment to the cache.
|
||||
DEBUG_ONLY(zap_segment(_cur_seg, false);)
|
||||
_cache = set_link(_cur_seg, _cache);
|
||||
++_cache_size;
|
||||
} else {
|
||||
DEBUG_ONLY(zap_segment(_cur_seg, true);)
|
||||
free(_cur_seg, segment_bytes());
|
||||
}
|
||||
const bool at_empty_transition = prev == NULL;
|
||||
_cur_seg = prev;
|
||||
_cur_seg_size = _seg_size;
|
||||
_full_seg_size -= at_empty_transition ? 0 : _seg_size;
|
||||
DEBUG_ONLY(verify(at_empty_transition);)
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::free_segments(E* seg)
|
||||
{
|
||||
const size_t bytes = segment_bytes();
|
||||
while (seg != NULL) {
|
||||
E* const prev = get_link(seg);
|
||||
free(seg, bytes);
|
||||
seg = prev;
|
||||
}
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::reset(bool reset_cache)
|
||||
{
|
||||
_cur_seg_size = _seg_size; // So push() will alloc a new segment.
|
||||
_full_seg_size = 0;
|
||||
_cur_seg = NULL;
|
||||
if (reset_cache) {
|
||||
_cache_size = 0;
|
||||
_cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class E>
|
||||
void Stack<E>::verify(bool at_empty_transition) const
|
||||
{
|
||||
assert(size() <= max_size(), "stack exceeded bounds");
|
||||
assert(cache_size() <= max_cache_size(), "cache exceeded bounds");
|
||||
assert(_cur_seg_size <= segment_size(), "segment index exceeded bounds");
|
||||
|
||||
assert(_full_seg_size % _seg_size == 0, "not a multiple");
|
||||
assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
|
||||
assert((_cache == NULL) == (cache_size() == 0), "mismatch");
|
||||
|
||||
if (is_empty()) {
|
||||
assert(_cur_seg_size == segment_size(), "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void Stack<E>::zap_segment(E* seg, bool zap_link_field) const
|
||||
{
|
||||
if (!ZapStackSegments) return;
|
||||
const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
|
||||
uint32_t* cur = (uint32_t*)seg;
|
||||
const uint32_t* end = cur + zap_bytes / sizeof(uint32_t);
|
||||
while (cur < end) {
|
||||
*cur++ = 0xfadfaded;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class E>
|
||||
E* ResourceStack<E>::alloc(size_t bytes)
|
||||
{
|
||||
return (E*) resource_allocate_bytes(bytes);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void ResourceStack<E>::free(E* addr, size_t bytes)
|
||||
{
|
||||
resource_free_bytes((char*) addr, bytes);
|
||||
}
|
||||
|
||||
template <class E>
|
||||
void StackIterator<E>::sync()
|
||||
{
|
||||
_full_seg_size = _stack._full_seg_size;
|
||||
_cur_seg_size = _stack._cur_seg_size;
|
||||
_cur_seg = _stack._cur_seg;
|
||||
}
|
||||
|
||||
template <class E>
|
||||
E* StackIterator<E>::next_addr()
|
||||
{
|
||||
assert(!is_empty(), "no items left");
|
||||
if (_cur_seg_size == 1) {
|
||||
E* addr = _cur_seg;
|
||||
_cur_seg = _stack.get_link(_cur_seg);
|
||||
_cur_seg_size = _stack.segment_size();
|
||||
_full_seg_size -= _stack.segment_size();
|
||||
return addr;
|
||||
}
|
||||
return _cur_seg + --_cur_seg_size;
|
||||
}
|
@ -372,75 +372,47 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
|
||||
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
|
||||
// elements that do not fit in the TaskQueue.
|
||||
//
|
||||
// Three methods from super classes are overridden:
|
||||
// This class hides two methods from super classes:
|
||||
//
|
||||
// initialize() - initialize the super classes and create the overflow stack
|
||||
// push() - push onto the task queue or, if that fails, onto the overflow stack
|
||||
// is_empty() - return true if both the TaskQueue and overflow stack are empty
|
||||
//
|
||||
// Note that size() is not overridden--it returns the number of elements in the
|
||||
// Note that size() is not hidden--it returns the number of elements in the
|
||||
// TaskQueue, and does not include the size of the overflow stack. This
|
||||
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
|
||||
template<class E, unsigned int N = TASKQUEUE_SIZE>
|
||||
class OverflowTaskQueue: public GenericTaskQueue<E, N>
|
||||
{
|
||||
public:
|
||||
typedef GrowableArray<E> overflow_t;
|
||||
typedef Stack<E> overflow_t;
|
||||
typedef GenericTaskQueue<E, N> taskqueue_t;
|
||||
|
||||
TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
|
||||
|
||||
OverflowTaskQueue();
|
||||
~OverflowTaskQueue();
|
||||
void initialize();
|
||||
|
||||
inline overflow_t* overflow_stack() const { return _overflow_stack; }
|
||||
|
||||
// Push task t onto the queue or onto the overflow stack. Return true.
|
||||
inline bool push(E t);
|
||||
|
||||
// Attempt to pop from the overflow stack; return true if anything was popped.
|
||||
inline bool pop_overflow(E& t);
|
||||
|
||||
inline overflow_t* overflow_stack() { return &_overflow_stack; }
|
||||
|
||||
inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
|
||||
inline bool overflow_empty() const { return overflow_stack()->is_empty(); }
|
||||
inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
|
||||
inline bool is_empty() const {
|
||||
return taskqueue_empty() && overflow_empty();
|
||||
}
|
||||
|
||||
private:
|
||||
overflow_t* _overflow_stack;
|
||||
overflow_t _overflow_stack;
|
||||
};
|
||||
|
||||
template <class E, unsigned int N>
|
||||
OverflowTaskQueue<E, N>::OverflowTaskQueue()
|
||||
{
|
||||
_overflow_stack = NULL;
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
OverflowTaskQueue<E, N>::~OverflowTaskQueue()
|
||||
{
|
||||
if (_overflow_stack != NULL) {
|
||||
delete _overflow_stack;
|
||||
_overflow_stack = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
void OverflowTaskQueue<E, N>::initialize()
|
||||
{
|
||||
taskqueue_t::initialize();
|
||||
assert(_overflow_stack == NULL, "memory leak");
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
bool OverflowTaskQueue<E, N>::push(E t)
|
||||
{
|
||||
if (!taskqueue_t::push(t)) {
|
||||
overflow_stack()->push(t);
|
||||
TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->length()));
|
||||
TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user