6962589: remove breadth first scanning code from parallel gc
Remove the breadth-first copying order from ParallelScavenge and use depth-first by default. Reviewed-by: jcoomes, ysr, johnc
This commit is contained in:
parent
3a2b2b4fd8
commit
38ab95c64b
@ -330,7 +330,6 @@ psPromotionManager.cpp psPromotionManager.inline.hpp
|
||||
psPromotionManager.cpp psScavenge.inline.hpp
|
||||
|
||||
psPromotionManager.hpp allocation.hpp
|
||||
psPromotionManager.hpp prefetchQueue.hpp
|
||||
psPromotionManager.hpp psPromotionLAB.hpp
|
||||
psPromotionManager.hpp taskqueue.hpp
|
||||
|
||||
|
@ -123,7 +123,6 @@ void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
|
||||
assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
|
||||
assert(start_array->covered_region().contains(sp->used_region()),
|
||||
"ObjectStartArray does not cover space");
|
||||
bool depth_first = pm->depth_first();
|
||||
|
||||
if (sp->not_empty()) {
|
||||
oop* sp_top = (oop*)space_top;
|
||||
@ -201,21 +200,12 @@ void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
|
||||
*first_nonclean_card++ = clean_card;
|
||||
}
|
||||
// scan oops in objects
|
||||
// hoisted the if (depth_first) check out of the loop
|
||||
if (depth_first){
|
||||
do {
|
||||
oop(bottom_obj)->push_contents(pm);
|
||||
bottom_obj += oop(bottom_obj)->size();
|
||||
assert(bottom_obj <= sp_top, "just checking");
|
||||
} while (bottom_obj < top);
|
||||
pm->drain_stacks_cond_depth();
|
||||
} else {
|
||||
do {
|
||||
oop(bottom_obj)->copy_contents(pm);
|
||||
bottom_obj += oop(bottom_obj)->size();
|
||||
assert(bottom_obj <= sp_top, "just checking");
|
||||
} while (bottom_obj < top);
|
||||
}
|
||||
do {
|
||||
oop(bottom_obj)->push_contents(pm);
|
||||
bottom_obj += oop(bottom_obj)->size();
|
||||
assert(bottom_obj <= sp_top, "just checking");
|
||||
} while (bottom_obj < top);
|
||||
pm->drain_stacks_cond_depth();
|
||||
// remember top oop* scanned
|
||||
prev_top = top;
|
||||
}
|
||||
@ -230,7 +220,6 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||
uint stripe_number) {
|
||||
int ssize = 128; // Naked constant! Work unit = 64k.
|
||||
int dirty_card_count = 0;
|
||||
bool depth_first = pm->depth_first();
|
||||
|
||||
oop* sp_top = (oop*)space_top;
|
||||
jbyte* start_card = byte_for(sp->bottom());
|
||||
@ -363,43 +352,22 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||
const int interval = PrefetchScanIntervalInBytes;
|
||||
// scan all objects in the range
|
||||
if (interval != 0) {
|
||||
// hoisted the if (depth_first) check out of the loop
|
||||
if (depth_first) {
|
||||
while (p < to) {
|
||||
Prefetch::write(p, interval);
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
pm->drain_stacks_cond_depth();
|
||||
} else {
|
||||
while (p < to) {
|
||||
Prefetch::write(p, interval);
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->copy_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
while (p < to) {
|
||||
Prefetch::write(p, interval);
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
pm->drain_stacks_cond_depth();
|
||||
} else {
|
||||
// hoisted the if (depth_first) check out of the loop
|
||||
if (depth_first) {
|
||||
while (p < to) {
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
pm->drain_stacks_cond_depth();
|
||||
} else {
|
||||
while (p < to) {
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->copy_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
while (p < to) {
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
pm->drain_stacks_cond_depth();
|
||||
}
|
||||
last_scanned = p;
|
||||
}
|
||||
|
@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// PrefetchQueue is a FIFO queue of variable length (currently 8).
|
||||
//
|
||||
// We need to examine the performance penalty of variable lengths.
|
||||
// We may also want to split this into cpu dependent bits.
|
||||
//
|
||||
|
||||
const int PREFETCH_QUEUE_SIZE = 8;
|
||||
|
||||
class PrefetchQueue : public CHeapObj {
|
||||
private:
|
||||
void* _prefetch_queue[PREFETCH_QUEUE_SIZE];
|
||||
uint _prefetch_index;
|
||||
|
||||
public:
|
||||
int length() { return PREFETCH_QUEUE_SIZE; }
|
||||
|
||||
inline void clear() {
|
||||
for(int i=0; i<PREFETCH_QUEUE_SIZE; i++) {
|
||||
_prefetch_queue[i] = NULL;
|
||||
}
|
||||
_prefetch_index = 0;
|
||||
}
|
||||
|
||||
template <class T> inline void* push_and_pop(T* p) {
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
Prefetch::write(o->mark_addr(), 0);
|
||||
// This prefetch is intended to make sure the size field of array
|
||||
// oops is in cache. It assumes the the object layout is
|
||||
// mark -> klass -> size, and that mark and klass are heapword
|
||||
// sized. If this should change, this prefetch will need updating!
|
||||
Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
|
||||
_prefetch_queue[_prefetch_index++] = p;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
}
|
||||
|
||||
// Stores a NULL pointer in the pop'd location.
|
||||
inline void* pop() {
|
||||
_prefetch_queue[_prefetch_index++] = NULL;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
}
|
||||
};
|
@ -27,7 +27,6 @@
|
||||
|
||||
PSPromotionManager** PSPromotionManager::_manager_array = NULL;
|
||||
OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
|
||||
OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL;
|
||||
PSOldGen* PSPromotionManager::_old_gen = NULL;
|
||||
MutableSpace* PSPromotionManager::_young_space = NULL;
|
||||
|
||||
@ -42,23 +41,14 @@ void PSPromotionManager::initialize() {
|
||||
_manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
|
||||
guarantee(_manager_array != NULL, "Could not initialize promotion manager");
|
||||
|
||||
if (UseDepthFirstScavengeOrder) {
|
||||
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
|
||||
guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
|
||||
} else {
|
||||
_stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
|
||||
guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
|
||||
}
|
||||
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
|
||||
guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
|
||||
|
||||
// Create and register the PSPromotionManager(s) for the worker threads.
|
||||
for(uint i=0; i<ParallelGCThreads; i++) {
|
||||
_manager_array[i] = new PSPromotionManager();
|
||||
guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
|
||||
if (UseDepthFirstScavengeOrder) {
|
||||
stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
|
||||
} else {
|
||||
stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
|
||||
}
|
||||
stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
|
||||
}
|
||||
|
||||
// The VMThread gets its own PSPromotionManager, which is not available
|
||||
@ -93,11 +83,7 @@ void PSPromotionManager::post_scavenge() {
|
||||
TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
PSPromotionManager* manager = manager_array(i);
|
||||
if (UseDepthFirstScavengeOrder) {
|
||||
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
|
||||
} else {
|
||||
assert(manager->claimed_stack_breadth()->is_empty(), "should be empty");
|
||||
}
|
||||
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
|
||||
manager->flush_labs();
|
||||
}
|
||||
}
|
||||
@ -105,10 +91,8 @@ void PSPromotionManager::post_scavenge() {
|
||||
#if TASKQUEUE_STATS
|
||||
void
|
||||
PSPromotionManager::print_taskqueue_stats(uint i) const {
|
||||
const TaskQueueStats& stats = depth_first() ?
|
||||
_claimed_stack_depth.stats : _claimed_stack_breadth.stats;
|
||||
tty->print("%3u ", i);
|
||||
stats.print();
|
||||
_claimed_stack_depth.stats.print();
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
@ -128,8 +112,7 @@ static const char* const pm_stats_hdr[] = {
|
||||
|
||||
void
|
||||
PSPromotionManager::print_stats() {
|
||||
const bool df = UseDepthFirstScavengeOrder;
|
||||
tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth",
|
||||
tty->print_cr("== GC Tasks Stats, GC %3d",
|
||||
Universe::heap()->total_collections());
|
||||
|
||||
tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
|
||||
@ -147,9 +130,7 @@ PSPromotionManager::print_stats() {
|
||||
|
||||
void
|
||||
PSPromotionManager::reset_stats() {
|
||||
TaskQueueStats& stats = depth_first() ?
|
||||
claimed_stack_depth()->stats : claimed_stack_breadth()->stats;
|
||||
stats.reset();
|
||||
claimed_stack_depth()->stats.reset();
|
||||
_masked_pushes = _masked_steals = 0;
|
||||
_arrays_chunked = _array_chunks_processed = 0;
|
||||
}
|
||||
@ -158,19 +139,13 @@ PSPromotionManager::reset_stats() {
|
||||
PSPromotionManager::PSPromotionManager() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
_depth_first = UseDepthFirstScavengeOrder;
|
||||
|
||||
// We set the old lab's start array.
|
||||
_old_lab.set_start_array(old_gen()->start_array());
|
||||
|
||||
uint queue_size;
|
||||
if (depth_first()) {
|
||||
claimed_stack_depth()->initialize();
|
||||
queue_size = claimed_stack_depth()->max_elems();
|
||||
} else {
|
||||
claimed_stack_breadth()->initialize();
|
||||
queue_size = claimed_stack_breadth()->max_elems();
|
||||
}
|
||||
claimed_stack_depth()->initialize();
|
||||
queue_size = claimed_stack_depth()->max_elems();
|
||||
|
||||
_totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
|
||||
if (_totally_drain) {
|
||||
@ -205,14 +180,11 @@ void PSPromotionManager::reset() {
|
||||
_old_lab.initialize(MemRegion(lab_base, (size_t)0));
|
||||
_old_gen_is_full = false;
|
||||
|
||||
_prefetch_queue.clear();
|
||||
|
||||
TASKQUEUE_STATS_ONLY(reset_stats());
|
||||
}
|
||||
|
||||
|
||||
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
@ -250,50 +222,6 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(tq->overflow_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
|
||||
assert(!depth_first(), "invariant");
|
||||
assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant");
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
MutableSpace* to_space = heap->young_gen()->to_space();
|
||||
MutableSpace* old_space = heap->old_gen()->object_space();
|
||||
MutableSpace* perm_space = heap->perm_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
||||
OverflowTaskQueue<oop>* const tq = claimed_stack_breadth();
|
||||
do {
|
||||
oop obj;
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while (tq->pop_overflow(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
|
||||
if (totally_drain) {
|
||||
while (tq->pop_local(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
} else {
|
||||
while (tq->size() > _target_stack_size && tq->pop_local(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
}
|
||||
|
||||
// If we could not find any other work, flush the prefetch queue
|
||||
if (tq->is_empty()) {
|
||||
flush_prefetch_queue();
|
||||
}
|
||||
} while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
|
||||
|
||||
assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
|
||||
assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
|
||||
assert(tq->overflow_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void PSPromotionManager::flush_labs() {
|
||||
assert(stacks_empty(), "Attempt to flush lab with live stack");
|
||||
|
||||
@ -319,7 +247,7 @@ void PSPromotionManager::flush_labs() {
|
||||
// performance.
|
||||
//
|
||||
|
||||
oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
||||
oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
assert(PSScavenge::should_scavenge(&o), "Sanity");
|
||||
|
||||
oop new_obj = NULL;
|
||||
@ -423,24 +351,20 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
||||
assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
|
||||
}
|
||||
|
||||
if (depth_first) {
|
||||
// Do the size comparison first with new_obj_size, which we
|
||||
// already have. Hopefully, only a few objects are larger than
|
||||
// _min_array_size_for_chunking, and most of them will be arrays.
|
||||
// So, the is->objArray() test would be very infrequent.
|
||||
if (new_obj_size > _min_array_size_for_chunking &&
|
||||
new_obj->is_objArray() &&
|
||||
PSChunkLargeArrays) {
|
||||
// we'll chunk it
|
||||
oop* const masked_o = mask_chunked_array_oop(o);
|
||||
push_depth(masked_o);
|
||||
TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
|
||||
} else {
|
||||
// we'll just push its contents
|
||||
new_obj->push_contents(this);
|
||||
}
|
||||
// Do the size comparison first with new_obj_size, which we
|
||||
// already have. Hopefully, only a few objects are larger than
|
||||
// _min_array_size_for_chunking, and most of them will be arrays.
|
||||
// So, the is->objArray() test would be very infrequent.
|
||||
if (new_obj_size > _min_array_size_for_chunking &&
|
||||
new_obj->is_objArray() &&
|
||||
PSChunkLargeArrays) {
|
||||
// we'll chunk it
|
||||
oop* const masked_o = mask_chunked_array_oop(o);
|
||||
push_depth(masked_o);
|
||||
TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
|
||||
} else {
|
||||
push_breadth(new_obj);
|
||||
// we'll just push its contents
|
||||
new_obj->push_contents(this);
|
||||
}
|
||||
} else {
|
||||
// We lost, someone else "owns" this object
|
||||
@ -537,13 +461,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
|
||||
// We won any races, we "own" this object.
|
||||
assert(obj == obj->forwardee(), "Sanity");
|
||||
|
||||
if (depth_first()) {
|
||||
obj->push_contents(this);
|
||||
} else {
|
||||
// Don't bother incrementing the age, just push
|
||||
// onto the claimed_stack..
|
||||
push_breadth(obj);
|
||||
}
|
||||
obj->push_contents(this);
|
||||
|
||||
// Save the mark if needed
|
||||
PSScavenge::oop_promotion_failed(obj, obj_mark);
|
||||
|
@ -48,7 +48,6 @@ class PSPromotionManager : public CHeapObj {
|
||||
private:
|
||||
static PSPromotionManager** _manager_array;
|
||||
static OopStarTaskQueueSet* _stack_array_depth;
|
||||
static OopTaskQueueSet* _stack_array_breadth;
|
||||
static PSOldGen* _old_gen;
|
||||
static MutableSpace* _young_space;
|
||||
|
||||
@ -69,12 +68,10 @@ class PSPromotionManager : public CHeapObj {
|
||||
PSOldPromotionLAB _old_lab;
|
||||
bool _young_gen_is_full;
|
||||
bool _old_gen_is_full;
|
||||
PrefetchQueue _prefetch_queue;
|
||||
|
||||
OopStarTaskQueue _claimed_stack_depth;
|
||||
OverflowTaskQueue<oop> _claimed_stack_breadth;
|
||||
|
||||
bool _depth_first;
|
||||
bool _totally_drain;
|
||||
uint _target_stack_size;
|
||||
|
||||
@ -87,7 +84,6 @@ class PSPromotionManager : public CHeapObj {
|
||||
|
||||
inline static PSPromotionManager* manager_array(int index);
|
||||
template <class T> inline void claim_or_forward_internal_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_internal_breadth(T* p);
|
||||
|
||||
// On the task queues we push reference locations as well as
|
||||
// partially-scanned arrays (in the latter case, we push an oop to
|
||||
@ -136,19 +132,11 @@ class PSPromotionManager : public CHeapObj {
|
||||
void process_array_chunk(oop old);
|
||||
|
||||
template <class T> void push_depth(T* p) {
|
||||
assert(depth_first(), "pre-condition");
|
||||
claimed_stack_depth()->push(p);
|
||||
}
|
||||
|
||||
void push_breadth(oop o) {
|
||||
assert(!depth_first(), "pre-condition");
|
||||
claimed_stack_breadth()->push(o);
|
||||
}
|
||||
|
||||
protected:
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
|
||||
|
||||
public:
|
||||
// Static
|
||||
static void initialize();
|
||||
@ -163,19 +151,12 @@ class PSPromotionManager : public CHeapObj {
|
||||
return stack_array_depth()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal_breadth(int queue_num, int* seed, oop& t) {
|
||||
return stack_array_breadth()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
PSPromotionManager();
|
||||
|
||||
// Accessors
|
||||
OopStarTaskQueue* claimed_stack_depth() {
|
||||
return &_claimed_stack_depth;
|
||||
}
|
||||
OverflowTaskQueue<oop>* claimed_stack_breadth() {
|
||||
return &_claimed_stack_breadth;
|
||||
}
|
||||
|
||||
bool young_gen_is_full() { return _young_gen_is_full; }
|
||||
|
||||
@ -183,18 +164,14 @@ class PSPromotionManager : public CHeapObj {
|
||||
void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
|
||||
|
||||
// Promotion methods
|
||||
oop copy_to_survivor_space(oop o, bool depth_first);
|
||||
oop copy_to_survivor_space(oop o);
|
||||
oop oop_promotion_failed(oop obj, markOop obj_mark);
|
||||
|
||||
void reset();
|
||||
|
||||
void flush_labs();
|
||||
void drain_stacks(bool totally_drain) {
|
||||
if (depth_first()) {
|
||||
drain_stacks_depth(totally_drain);
|
||||
} else {
|
||||
drain_stacks_breadth(totally_drain);
|
||||
}
|
||||
drain_stacks_depth(totally_drain);
|
||||
}
|
||||
public:
|
||||
void drain_stacks_cond_depth() {
|
||||
@ -203,22 +180,14 @@ class PSPromotionManager : public CHeapObj {
|
||||
}
|
||||
}
|
||||
void drain_stacks_depth(bool totally_drain);
|
||||
void drain_stacks_breadth(bool totally_drain);
|
||||
|
||||
bool depth_first() const {
|
||||
return _depth_first;
|
||||
}
|
||||
bool stacks_empty() {
|
||||
return depth_first() ?
|
||||
claimed_stack_depth()->is_empty() :
|
||||
claimed_stack_breadth()->is_empty();
|
||||
return claimed_stack_depth()->is_empty();
|
||||
}
|
||||
|
||||
inline void process_popped_location_depth(StarTask p);
|
||||
|
||||
inline void flush_prefetch_queue();
|
||||
template <class T> inline void claim_or_forward_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_breadth(T* p);
|
||||
|
||||
TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
|
||||
};
|
||||
|
@ -45,33 +45,8 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
} else {
|
||||
o = copy_to_survivor_space(o, false);
|
||||
}
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::flush_prefetch_queue() {
|
||||
assert(!depth_first(), "invariant");
|
||||
for (int i = 0; i < _prefetch_queue.length(); i++) {
|
||||
claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
@ -80,36 +55,6 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
claim_or_forward_internal_depth(p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
|
||||
assert(!depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
if (UsePrefetchQueue) {
|
||||
claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
|
||||
} else {
|
||||
// This option is used for testing. The use of the prefetch
|
||||
// queue can delay the processing of the objects and thus
|
||||
// change the order of object scans. For example, remembered
|
||||
// set updates are typically the clearing of the remembered
|
||||
// set (the cards) followed by updates of the remembered set
|
||||
// for young-to-old pointers. In a situation where there
|
||||
// is an error in the sequence of clearing and updating
|
||||
// (e.g. clear card A, update card A, erroneously clear
|
||||
// card A again) the error can be obscured by a delay
|
||||
// in the update due to the use of the prefetch queue
|
||||
// (e.g., clear card A, erroneously clear card A again,
|
||||
// update card A that was pushed into the prefetch queue
|
||||
// and thus delayed until after the erronous clear). The
|
||||
// length of the delay is random depending on the objects
|
||||
// in the queue and the delay can be zero.
|
||||
claim_or_forward_internal_breadth(p);
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
||||
if (is_oop_masked(p)) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
|
@ -157,10 +157,8 @@ void PSRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
q->enqueue(new PSRefProcTaskProxy(task, i));
|
||||
}
|
||||
ParallelTaskTerminator terminator(
|
||||
ParallelScavengeHeap::gc_task_manager()->workers(),
|
||||
UseDepthFirstScavengeOrder ?
|
||||
(TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()
|
||||
: (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth());
|
||||
ParallelScavengeHeap::gc_task_manager()->workers(),
|
||||
(TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
|
||||
if (task.marks_oops_alive() && ParallelGCThreads > 1) {
|
||||
for (uint j=0; j<ParallelGCThreads; j++) {
|
||||
q->enqueue(new StealTask(&terminator));
|
||||
@ -375,10 +373,8 @@ bool PSScavenge::invoke_no_policy() {
|
||||
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
|
||||
|
||||
ParallelTaskTerminator terminator(
|
||||
gc_task_manager()->workers(),
|
||||
promotion_manager->depth_first() ?
|
||||
(TaskQueueSetSuper*) promotion_manager->stack_array_depth()
|
||||
: (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
|
||||
gc_task_manager()->workers(),
|
||||
(TaskQueueSetSuper*) promotion_manager->stack_array_depth());
|
||||
if (ParallelGCThreads>1) {
|
||||
for (uint j=0; j<ParallelGCThreads; j++) {
|
||||
q->enqueue(new StealTask(&terminator));
|
||||
|
@ -65,7 +65,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: pm->copy_to_survivor_space(o, pm->depth_first());
|
||||
: pm->copy_to_survivor_space(o);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
|
@ -144,29 +144,15 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
||||
"stacks should be empty at this point");
|
||||
|
||||
int random_seed = 17;
|
||||
if (pm->depth_first()) {
|
||||
while(true) {
|
||||
StarTask p;
|
||||
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
|
||||
TASKQUEUE_STATS_ONLY(pm->record_steal(p));
|
||||
pm->process_popped_location_depth(p);
|
||||
pm->drain_stacks_depth(true);
|
||||
} else {
|
||||
if (terminator()->offer_termination()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while(true) {
|
||||
oop obj;
|
||||
if (PSPromotionManager::steal_breadth(which, &random_seed, obj)) {
|
||||
obj->copy_contents(pm);
|
||||
pm->drain_stacks_breadth(true);
|
||||
} else {
|
||||
if (terminator()->offer_termination()) {
|
||||
break;
|
||||
}
|
||||
while(true) {
|
||||
StarTask p;
|
||||
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
|
||||
TASKQUEUE_STATS_ONLY(pm->record_steal(p));
|
||||
pm->process_popped_location_depth(p);
|
||||
pm->drain_stacks_depth(true);
|
||||
} else {
|
||||
if (terminator()->offer_termination()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -108,10 +108,6 @@ int arrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void arrayKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass");
|
||||
}
|
||||
|
||||
void arrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass");
|
||||
}
|
||||
|
@ -120,10 +120,6 @@ int compiledICHolderKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void compiledICHolderKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_compiledICHolder(), "must be compiledICHolder");
|
||||
}
|
||||
|
||||
void compiledICHolderKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_compiledICHolder(), "must be compiledICHolder");
|
||||
}
|
||||
|
@ -157,10 +157,6 @@ int constMethodKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void constMethodKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_constMethod(), "should be constMethod");
|
||||
}
|
||||
|
||||
void constMethodKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_constMethod(), "should be constMethod");
|
||||
}
|
||||
|
@ -268,21 +268,6 @@ constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
|
||||
return cp->object_size();
|
||||
}
|
||||
|
||||
void constantPoolKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_constantPool(), "should be constant pool");
|
||||
constantPoolOop cp = (constantPoolOop) obj;
|
||||
if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) {
|
||||
oop* base = (oop*)cp->base();
|
||||
for (int i = 0; i < cp->length(); ++i, ++base) {
|
||||
if (cp->tag_at(i).is_string()) {
|
||||
if (PSScavenge::should_scavenge(base)) {
|
||||
pm->claim_or_forward_breadth(base);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_constantPool(), "should be constant pool");
|
||||
constantPoolOop cp = (constantPoolOop) obj;
|
||||
|
@ -166,29 +166,6 @@ bool constantPoolCacheKlass::oop_is_conc_safe(oop obj) const {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm,
|
||||
oop obj) {
|
||||
assert(obj->is_constantPoolCache(), "should be constant pool");
|
||||
if (EnableInvokeDynamic) {
|
||||
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
|
||||
// during a scavenge, it is safe to inspect my pool, since it is perm
|
||||
constantPoolOop pool = cache->constant_pool();
|
||||
assert(pool->is_constantPool(), "should be constant pool");
|
||||
if (pool->has_invokedynamic()) {
|
||||
for (int i = 0; i < cache->length(); i++) {
|
||||
ConstantPoolCacheEntry* e = cache->entry_at(i);
|
||||
oop* p = (oop*)&e->_f1;
|
||||
if (e->is_secondary_entry()) {
|
||||
if (PSScavenge::should_scavenge(p))
|
||||
pm->claim_or_forward_breadth(p);
|
||||
assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)),
|
||||
"no live oops here");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm,
|
||||
oop obj) {
|
||||
assert(obj->is_constantPoolCache(), "should be constant pool");
|
||||
|
@ -1809,18 +1809,7 @@ int instanceKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(!pm->depth_first(), "invariant");
|
||||
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
|
||||
obj, \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_breadth(p); \
|
||||
}, \
|
||||
assert_nothing )
|
||||
}
|
||||
|
||||
void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(pm->depth_first(), "invariant");
|
||||
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
|
||||
obj, \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
@ -1846,18 +1835,7 @@ int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
|
||||
return size_helper();
|
||||
}
|
||||
|
||||
void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
|
||||
assert(!pm->depth_first(), "invariant");
|
||||
InstanceKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(), static_oop_field_size(), \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_breadth(p); \
|
||||
}, \
|
||||
assert_nothing )
|
||||
}
|
||||
|
||||
void instanceKlass::push_static_fields(PSPromotionManager* pm) {
|
||||
assert(pm->depth_first(), "invariant");
|
||||
InstanceKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(), static_oop_field_size(), \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
|
@ -711,7 +711,6 @@ class instanceKlass: public Klass {
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Parallel Scavenge
|
||||
void copy_static_fields(PSPromotionManager* pm);
|
||||
void push_static_fields(PSPromotionManager* pm);
|
||||
|
||||
// Parallel Old
|
||||
|
@ -292,41 +292,7 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void instanceKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(!pm->depth_first(), "invariant");
|
||||
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
|
||||
ik->copy_static_fields(pm);
|
||||
|
||||
oop* loader_addr = ik->adr_class_loader();
|
||||
if (PSScavenge::should_scavenge(loader_addr)) {
|
||||
pm->claim_or_forward_breadth(loader_addr);
|
||||
}
|
||||
|
||||
oop* pd_addr = ik->adr_protection_domain();
|
||||
if (PSScavenge::should_scavenge(pd_addr)) {
|
||||
pm->claim_or_forward_breadth(pd_addr);
|
||||
}
|
||||
|
||||
oop* hk_addr = ik->adr_host_klass();
|
||||
if (PSScavenge::should_scavenge(hk_addr)) {
|
||||
pm->claim_or_forward_breadth(hk_addr);
|
||||
}
|
||||
|
||||
oop* sg_addr = ik->adr_signers();
|
||||
if (PSScavenge::should_scavenge(sg_addr)) {
|
||||
pm->claim_or_forward_breadth(sg_addr);
|
||||
}
|
||||
|
||||
oop* bsm_addr = ik->adr_bootstrap_method();
|
||||
if (PSScavenge::should_scavenge(bsm_addr)) {
|
||||
pm->claim_or_forward_breadth(bsm_addr);
|
||||
}
|
||||
|
||||
klassKlass::oop_copy_contents(pm, obj);
|
||||
}
|
||||
|
||||
void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(pm->depth_first(), "invariant");
|
||||
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
|
||||
ik->push_static_fields(pm);
|
||||
|
||||
@ -355,7 +321,7 @@ void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
pm->claim_or_forward_depth(bsm_addr);
|
||||
}
|
||||
|
||||
klassKlass::oop_copy_contents(pm, obj);
|
||||
klassKlass::oop_push_contents(pm, obj);
|
||||
}
|
||||
|
||||
int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
|
@ -272,42 +272,9 @@ ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
|
||||
#ifndef SERIALGC
|
||||
template <class T>
|
||||
void specialized_oop_copy_contents(instanceRefKlass *ref,
|
||||
PSPromotionManager* pm, oop obj) {
|
||||
assert(!pm->depth_first(), "invariant");
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
if (PSScavenge::should_scavenge(referent_addr)) {
|
||||
ReferenceProcessor* rp = PSScavenge::reference_processor();
|
||||
if (rp->discover_reference(obj, ref->reference_type())) {
|
||||
// reference already enqueued, referent and next will be traversed later
|
||||
ref->instanceKlass::oop_copy_contents(pm, obj);
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
pm->claim_or_forward_breadth(referent_addr);
|
||||
}
|
||||
}
|
||||
// treat next as normal oop
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (PSScavenge::should_scavenge(next_addr)) {
|
||||
pm->claim_or_forward_breadth(next_addr);
|
||||
}
|
||||
ref->instanceKlass::oop_copy_contents(pm, obj);
|
||||
}
|
||||
|
||||
void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_copy_contents<narrowOop>(this, pm, obj);
|
||||
} else {
|
||||
specialized_oop_copy_contents<oop>(this, pm, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void specialized_oop_push_contents(instanceRefKlass *ref,
|
||||
PSPromotionManager* pm, oop obj) {
|
||||
assert(pm->depth_first(), "invariant");
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
if (PSScavenge::should_scavenge(referent_addr)) {
|
||||
ReferenceProcessor* rp = PSScavenge::reference_processor();
|
||||
|
@ -161,9 +161,6 @@ int klassKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void klassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
}
|
||||
|
||||
void klassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
#ifndef SERIALGC
|
||||
#define PARALLEL_GC_DECLS \
|
||||
virtual void oop_copy_contents(PSPromotionManager* pm, oop obj); \
|
||||
virtual void oop_push_contents(PSPromotionManager* pm, oop obj); \
|
||||
/* Parallel Old GC support \
|
||||
\
|
||||
@ -43,7 +42,6 @@
|
||||
|
||||
// Pure virtual version for klass.hpp
|
||||
#define PARALLEL_GC_DECLS_PV \
|
||||
virtual void oop_copy_contents(PSPromotionManager* pm, oop obj) = 0; \
|
||||
virtual void oop_push_contents(PSPromotionManager* pm, oop obj) = 0; \
|
||||
virtual void oop_follow_contents(ParCompactionManager* cm, oop obj) = 0; \
|
||||
virtual int oop_update_pointers(ParCompactionManager* cm, oop obj) = 0; \
|
||||
|
@ -154,13 +154,6 @@ int methodDataKlass::oop_adjust_pointers(oop obj) {
|
||||
|
||||
|
||||
#ifndef SERIALGC
|
||||
void methodDataKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert (obj->is_methodData(), "object must be method data");
|
||||
methodDataOop m = methodDataOop(obj);
|
||||
// This should never point into the young gen.
|
||||
assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
|
||||
}
|
||||
|
||||
void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert (obj->is_methodData(), "object must be method data");
|
||||
methodDataOop m = methodDataOop(obj);
|
||||
|
@ -184,10 +184,6 @@ int methodKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void methodKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_method(), "should be method");
|
||||
}
|
||||
|
||||
void methodKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_method(), "should be method");
|
||||
}
|
||||
|
@ -426,18 +426,7 @@ int objArrayKlass::oop_adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(!pm->depth_first(), "invariant");
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
ObjArrayKlass_OOP_ITERATE( \
|
||||
objArrayOop(obj), p, \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_breadth(p); \
|
||||
})
|
||||
}
|
||||
|
||||
void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(pm->depth_first(), "invariant");
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
ObjArrayKlass_OOP_ITERATE( \
|
||||
objArrayOop(obj), p, \
|
||||
|
@ -229,10 +229,6 @@ objArrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void objArrayKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->blueprint()->oop_is_objArrayKlass(),"must be an obj array klass");
|
||||
}
|
||||
|
||||
void objArrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->blueprint()->oop_is_objArrayKlass(),"must be an obj array klass");
|
||||
}
|
||||
|
@ -306,7 +306,6 @@ class oopDesc {
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Parallel Scavenge
|
||||
void copy_contents(PSPromotionManager* pm);
|
||||
void push_contents(PSPromotionManager* pm);
|
||||
|
||||
// Parallel Old
|
||||
|
@ -24,15 +24,6 @@
|
||||
|
||||
// ParallelScavengeHeap methods
|
||||
|
||||
inline void oopDesc::copy_contents(PSPromotionManager* pm) {
|
||||
Klass* klass = blueprint();
|
||||
if (!klass->oop_is_typeArray()) {
|
||||
// It might contain oops beyond the header, so take the virtual call.
|
||||
klass->oop_copy_contents(pm, this);
|
||||
}
|
||||
// Else skip it. The typeArrayKlass in the header never needs scavenging.
|
||||
}
|
||||
|
||||
inline void oopDesc::push_contents(PSPromotionManager* pm) {
|
||||
Klass* klass = blueprint();
|
||||
if (!klass->oop_is_typeArray()) {
|
||||
|
@ -184,10 +184,6 @@ int symbolKlass::oop_adjust_pointers(oop obj) {
|
||||
|
||||
|
||||
#ifndef SERIALGC
|
||||
void symbolKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_symbol(), "should be symbol");
|
||||
}
|
||||
|
||||
void symbolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_symbol(), "should be symbol");
|
||||
}
|
||||
|
@ -228,10 +228,6 @@ int typeArrayKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void typeArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
}
|
||||
|
||||
void typeArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
}
|
||||
|
@ -184,6 +184,8 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
||||
{ "DefaultMaxRAM", JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
||||
{ "DefaultInitialRAMFraction",
|
||||
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
||||
{ "UseDepthFirstScavengeOrder",
|
||||
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
|
||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||
};
|
||||
|
||||
|
@ -3088,10 +3088,6 @@ class CommandLineFlags {
|
||||
\
|
||||
product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
|
||||
\
|
||||
product(bool, UseDepthFirstScavengeOrder, true, \
|
||||
"true: the scavenge order will be depth-first, " \
|
||||
"false: the scavenge order will be breadth-first") \
|
||||
\
|
||||
product(bool, PSChunkLargeArrays, true, \
|
||||
"true: process large arrays in chunks") \
|
||||
\
|
||||
|
Loading…
Reference in New Issue
Block a user