Merge
This commit is contained in:
commit
d327b24c06
@ -1441,6 +1441,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
jint G1CollectedHeap::initialize() {
|
jint G1CollectedHeap::initialize() {
|
||||||
|
CollectedHeap::pre_initialize();
|
||||||
os::enable_vtime();
|
os::enable_vtime();
|
||||||
|
|
||||||
// Necessary to satisfy locking discipline assertions.
|
// Necessary to satisfy locking discipline assertions.
|
||||||
|
@ -1007,6 +1007,10 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool card_mark_must_follow_store() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool is_in_young(oop obj) {
|
bool is_in_young(oop obj) {
|
||||||
HeapRegion* hr = heap_region_containing(obj);
|
HeapRegion* hr = heap_region_containing(obj);
|
||||||
return hr != NULL && hr->is_young();
|
return hr != NULL && hr->is_young();
|
||||||
|
@ -73,7 +73,12 @@ void PtrQueue::enqueue_known_active(void* ptr) {
|
|||||||
|
|
||||||
void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
|
void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
|
||||||
assert(_lock->owned_by_self(), "Required.");
|
assert(_lock->owned_by_self(), "Required.");
|
||||||
|
|
||||||
|
// We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
|
||||||
|
// we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
|
||||||
|
// have the same rank and we may get the "possible deadlock" message
|
||||||
_lock->unlock();
|
_lock->unlock();
|
||||||
|
|
||||||
qset()->enqueue_complete_buffer(buf);
|
qset()->enqueue_complete_buffer(buf);
|
||||||
// We must relock only because the caller will unlock, for the normal
|
// We must relock only because the caller will unlock, for the normal
|
||||||
// case.
|
// case.
|
||||||
@ -140,7 +145,36 @@ void PtrQueue::handle_zero_index() {
|
|||||||
// holding the lock if there is one).
|
// holding the lock if there is one).
|
||||||
if (_buf != NULL) {
|
if (_buf != NULL) {
|
||||||
if (_lock) {
|
if (_lock) {
|
||||||
locking_enqueue_completed_buffer(_buf);
|
assert(_lock->owned_by_self(), "Required.");
|
||||||
|
|
||||||
|
// The current PtrQ may be the shared dirty card queue and
|
||||||
|
// may be being manipulated by more than one worker thread
|
||||||
|
// during a pause. Since the enqueuing of the completed
|
||||||
|
// buffer unlocks the Shared_DirtyCardQ_lock more than one
|
||||||
|
// worker thread can 'race' on reading the shared queue attributes
|
||||||
|
// (_buf and _index) and multiple threads can call into this
|
||||||
|
// routine for the same buffer. This will cause the completed
|
||||||
|
// buffer to be added to the CBL multiple times.
|
||||||
|
|
||||||
|
// We "claim" the current buffer by caching value of _buf in
|
||||||
|
// a local and clearing the field while holding _lock. When
|
||||||
|
// _lock is released (while enqueueing the completed buffer)
|
||||||
|
// the thread that acquires _lock will skip this code,
|
||||||
|
// preventing the subsequent the multiple enqueue, and
|
||||||
|
// install a newly allocated buffer below.
|
||||||
|
|
||||||
|
void** buf = _buf; // local pointer to completed buffer
|
||||||
|
_buf = NULL; // clear shared _buf field
|
||||||
|
|
||||||
|
locking_enqueue_completed_buffer(buf); // enqueue completed buffer
|
||||||
|
|
||||||
|
// While the current thread was enqueuing the buffer another thread
|
||||||
|
// may have a allocated a new buffer and inserted it into this pointer
|
||||||
|
// queue. If that happens then we just return so that the current
|
||||||
|
// thread doesn't overwrite the buffer allocated by the other thread
|
||||||
|
// and potentially losing some dirtied cards.
|
||||||
|
|
||||||
|
if (_buf != NULL) return;
|
||||||
} else {
|
} else {
|
||||||
if (qset()->process_or_enqueue_complete_buffer(_buf)) {
|
if (qset()->process_or_enqueue_complete_buffer(_buf)) {
|
||||||
// Recycle the buffer. No allocation.
|
// Recycle the buffer. No allocation.
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
||||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
||||||
* have any questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
void PtrQueue::handle_zero_index() {
|
|
||||||
assert(0 == _index, "Precondition.");
|
|
||||||
// This thread records the full buffer and allocates a new one (while
|
|
||||||
// holding the lock if there is one).
|
|
||||||
void** buf = _buf;
|
|
||||||
_buf = qset()->allocate_buffer();
|
|
||||||
_sz = qset()->buffer_size();
|
|
||||||
_index = _sz;
|
|
||||||
assert(0 <= _index && _index <= _sz, "Invariant.");
|
|
||||||
if (buf != NULL) {
|
|
||||||
if (_lock) {
|
|
||||||
locking_enqueue_completed_buffer(buf);
|
|
||||||
} else {
|
|
||||||
qset()->enqueue_complete_buffer(buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -51,6 +51,8 @@ static void trace_gen_sizes(const char* const str,
|
|||||||
}
|
}
|
||||||
|
|
||||||
jint ParallelScavengeHeap::initialize() {
|
jint ParallelScavengeHeap::initialize() {
|
||||||
|
CollectedHeap::pre_initialize();
|
||||||
|
|
||||||
// Cannot be initialized until after the flags are parsed
|
// Cannot be initialized until after the flags are parsed
|
||||||
GenerationSizer flag_parser;
|
GenerationSizer flag_parser;
|
||||||
|
|
||||||
@ -717,10 +719,6 @@ HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
|
|||||||
return young_gen()->allocate(size, true);
|
return young_gen()->allocate(size, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParallelScavengeHeap::fill_all_tlabs(bool retire) {
|
|
||||||
CollectedHeap::fill_all_tlabs(retire);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
|
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
|
||||||
CollectedHeap::accumulate_statistics_all_tlabs();
|
CollectedHeap::accumulate_statistics_all_tlabs();
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
protected:
|
protected:
|
||||||
static inline size_t total_invocations();
|
static inline size_t total_invocations();
|
||||||
HeapWord* allocate_new_tlab(size_t size);
|
HeapWord* allocate_new_tlab(size_t size);
|
||||||
void fill_all_tlabs(bool retire);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ParallelScavengeHeap() : CollectedHeap() {
|
ParallelScavengeHeap() : CollectedHeap() {
|
||||||
@ -191,6 +190,10 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool card_mark_must_follow_store() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Return true if we don't we need a store barrier for
|
// Return true if we don't we need a store barrier for
|
||||||
// initializing stores to an object at this address.
|
// initializing stores to an object at this address.
|
||||||
virtual bool can_elide_initializing_store_barrier(oop new_obj);
|
virtual bool can_elide_initializing_store_barrier(oop new_obj);
|
||||||
|
@ -59,8 +59,18 @@ CollectedHeap::CollectedHeap()
|
|||||||
PerfDataManager::create_string_variable(SUN_GC, "lastCause",
|
PerfDataManager::create_string_variable(SUN_GC, "lastCause",
|
||||||
80, GCCause::to_string(_gc_lastcause), CHECK);
|
80, GCCause::to_string(_gc_lastcause), CHECK);
|
||||||
}
|
}
|
||||||
|
_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CollectedHeap::pre_initialize() {
|
||||||
|
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
|
||||||
|
// otherwise remains unused.
|
||||||
|
#ifdef COMPLER2
|
||||||
|
_defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store());
|
||||||
|
#else
|
||||||
|
assert(_defer_initial_card_mark == false, "Who would set it?");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
|
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
|
||||||
@ -140,12 +150,13 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
|
|||||||
void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
|
void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
|
||||||
MemRegion deferred = thread->deferred_card_mark();
|
MemRegion deferred = thread->deferred_card_mark();
|
||||||
if (!deferred.is_empty()) {
|
if (!deferred.is_empty()) {
|
||||||
|
assert(_defer_initial_card_mark, "Otherwise should be empty");
|
||||||
{
|
{
|
||||||
// Verify that the storage points to a parsable object in heap
|
// Verify that the storage points to a parsable object in heap
|
||||||
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
|
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
|
||||||
assert(is_in(old_obj), "Not in allocated heap");
|
assert(is_in(old_obj), "Not in allocated heap");
|
||||||
assert(!can_elide_initializing_store_barrier(old_obj),
|
assert(!can_elide_initializing_store_barrier(old_obj),
|
||||||
"Else should have been filtered in defer_store_barrier()");
|
"Else should have been filtered in new_store_pre_barrier()");
|
||||||
assert(!is_in_permanent(old_obj), "Sanity: not expected");
|
assert(!is_in_permanent(old_obj), "Sanity: not expected");
|
||||||
assert(old_obj->is_oop(true), "Not an oop");
|
assert(old_obj->is_oop(true), "Not an oop");
|
||||||
assert(old_obj->is_parsable(), "Will not be concurrently parsable");
|
assert(old_obj->is_parsable(), "Will not be concurrently parsable");
|
||||||
@ -174,9 +185,7 @@ void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
|
|||||||
// so long as the card-mark is completed before the next
|
// so long as the card-mark is completed before the next
|
||||||
// scavenge. For all these cases, we can do a card mark
|
// scavenge. For all these cases, we can do a card mark
|
||||||
// at the point at which we do a slow path allocation
|
// at the point at which we do a slow path allocation
|
||||||
// in the old gen. For uniformity, however, we end
|
// in the old gen, i.e. in this call.
|
||||||
// up using the same scheme (see below) for all three
|
|
||||||
// cases (deferring the card-mark appropriately).
|
|
||||||
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
|
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
|
||||||
// in addition that the card-mark for an old gen allocated
|
// in addition that the card-mark for an old gen allocated
|
||||||
// object strictly follow any associated initializing stores.
|
// object strictly follow any associated initializing stores.
|
||||||
@ -199,12 +208,13 @@ void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
|
|||||||
// but, like in CMS, because of the presence of concurrent refinement
|
// but, like in CMS, because of the presence of concurrent refinement
|
||||||
// (much like CMS' precleaning), must strictly follow the oop-store.
|
// (much like CMS' precleaning), must strictly follow the oop-store.
|
||||||
// Thus, using the same protocol for maintaining the intended
|
// Thus, using the same protocol for maintaining the intended
|
||||||
// invariants turns out, serendepitously, to be the same for all
|
// invariants turns out, serendepitously, to be the same for both
|
||||||
// three collectors/heap types above.
|
// G1 and CMS.
|
||||||
//
|
//
|
||||||
// For each future collector, this should be reexamined with
|
// For any future collector, this code should be reexamined with
|
||||||
// that specific collector in mind.
|
// that specific collector in mind, and the documentation above suitably
|
||||||
oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
|
// extended and updated.
|
||||||
|
oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
|
||||||
// If a previous card-mark was deferred, flush it now.
|
// If a previous card-mark was deferred, flush it now.
|
||||||
flush_deferred_store_barrier(thread);
|
flush_deferred_store_barrier(thread);
|
||||||
if (can_elide_initializing_store_barrier(new_obj)) {
|
if (can_elide_initializing_store_barrier(new_obj)) {
|
||||||
@ -212,10 +222,17 @@ oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
|
|||||||
// following the flush above.
|
// following the flush above.
|
||||||
assert(thread->deferred_card_mark().is_empty(), "Error");
|
assert(thread->deferred_card_mark().is_empty(), "Error");
|
||||||
} else {
|
} else {
|
||||||
// Remember info for the newly deferred store barrier
|
MemRegion mr((HeapWord*)new_obj, new_obj->size());
|
||||||
MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size());
|
assert(!mr.is_empty(), "Error");
|
||||||
assert(!deferred.is_empty(), "Error");
|
if (_defer_initial_card_mark) {
|
||||||
thread->set_deferred_card_mark(deferred);
|
// Defer the card mark
|
||||||
|
thread->set_deferred_card_mark(mr);
|
||||||
|
} else {
|
||||||
|
// Do the card mark
|
||||||
|
BarrierSet* bs = barrier_set();
|
||||||
|
assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
|
||||||
|
bs->write_region(mr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return new_obj;
|
return new_obj;
|
||||||
}
|
}
|
||||||
@ -241,9 +258,9 @@ void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
|
|||||||
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
|
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
|
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
|
||||||
{
|
{
|
||||||
if (ZapFillerObjects) {
|
if (ZapFillerObjects && zap) {
|
||||||
Copy::fill_to_words(start + filler_array_hdr_size(),
|
Copy::fill_to_words(start + filler_array_hdr_size(),
|
||||||
words - filler_array_hdr_size(), 0XDEAFBABE);
|
words - filler_array_hdr_size(), 0XDEAFBABE);
|
||||||
}
|
}
|
||||||
@ -251,7 +268,7 @@ void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
|
|||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
void
|
void
|
||||||
CollectedHeap::fill_with_array(HeapWord* start, size_t words)
|
CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
|
||||||
{
|
{
|
||||||
assert(words >= filler_array_min_size(), "too small for an array");
|
assert(words >= filler_array_min_size(), "too small for an array");
|
||||||
assert(words <= filler_array_max_size(), "too big for a single object");
|
assert(words <= filler_array_max_size(), "too big for a single object");
|
||||||
@ -262,16 +279,16 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words)
|
|||||||
// Set the length first for concurrent GC.
|
// Set the length first for concurrent GC.
|
||||||
((arrayOop)start)->set_length((int)len);
|
((arrayOop)start)->set_length((int)len);
|
||||||
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
|
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
|
||||||
DEBUG_ONLY(zap_filler_array(start, words);)
|
DEBUG_ONLY(zap_filler_array(start, words, zap);)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
|
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
|
||||||
{
|
{
|
||||||
assert(words <= filler_array_max_size(), "too big for a single object");
|
assert(words <= filler_array_max_size(), "too big for a single object");
|
||||||
|
|
||||||
if (words >= filler_array_min_size()) {
|
if (words >= filler_array_min_size()) {
|
||||||
fill_with_array(start, words);
|
fill_with_array(start, words, zap);
|
||||||
} else if (words > 0) {
|
} else if (words > 0) {
|
||||||
assert(words == min_fill_size(), "unaligned size");
|
assert(words == min_fill_size(), "unaligned size");
|
||||||
post_allocation_setup_common(SystemDictionary::Object_klass(), start,
|
post_allocation_setup_common(SystemDictionary::Object_klass(), start,
|
||||||
@ -279,14 +296,14 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
|
void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
|
||||||
{
|
{
|
||||||
DEBUG_ONLY(fill_args_check(start, words);)
|
DEBUG_ONLY(fill_args_check(start, words);)
|
||||||
HandleMark hm; // Free handles before leaving.
|
HandleMark hm; // Free handles before leaving.
|
||||||
fill_with_object_impl(start, words);
|
fill_with_object_impl(start, words, zap);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
|
void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
|
||||||
{
|
{
|
||||||
DEBUG_ONLY(fill_args_check(start, words);)
|
DEBUG_ONLY(fill_args_check(start, words);)
|
||||||
HandleMark hm; // Free handles before leaving.
|
HandleMark hm; // Free handles before leaving.
|
||||||
@ -299,13 +316,13 @@ void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
|
|||||||
const size_t max = filler_array_max_size();
|
const size_t max = filler_array_max_size();
|
||||||
while (words > max) {
|
while (words > max) {
|
||||||
const size_t cur = words - max >= min ? max : max - min;
|
const size_t cur = words - max >= min ? max : max - min;
|
||||||
fill_with_array(start, cur);
|
fill_with_array(start, cur, zap);
|
||||||
start += cur;
|
start += cur;
|
||||||
words -= cur;
|
words -= cur;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
fill_with_object_impl(start, words);
|
fill_with_object_impl(start, words, zap);
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
|
HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
|
||||||
@ -313,22 +330,6 @@ HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::fill_all_tlabs(bool retire) {
|
|
||||||
assert(UseTLAB, "should not reach here");
|
|
||||||
// See note in ensure_parsability() below.
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint() ||
|
|
||||||
!is_init_completed(),
|
|
||||||
"should only fill tlabs at safepoint");
|
|
||||||
// The main thread starts allocating via a TLAB even before it
|
|
||||||
// has added itself to the threads list at vm boot-up.
|
|
||||||
assert(Threads::first() != NULL,
|
|
||||||
"Attempt to fill tlabs before main thread has been added"
|
|
||||||
" to threads list is doomed to failure!");
|
|
||||||
for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
|
|
||||||
thread->tlab().make_parsable(retire);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CollectedHeap::ensure_parsability(bool retire_tlabs) {
|
void CollectedHeap::ensure_parsability(bool retire_tlabs) {
|
||||||
// The second disjunct in the assertion below makes a concession
|
// The second disjunct in the assertion below makes a concession
|
||||||
// for the start-up verification done while the VM is being
|
// for the start-up verification done while the VM is being
|
||||||
@ -343,8 +344,24 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
|
|||||||
"Should only be called at a safepoint or at start-up"
|
"Should only be called at a safepoint or at start-up"
|
||||||
" otherwise concurrent mutator activity may make heap "
|
" otherwise concurrent mutator activity may make heap "
|
||||||
" unparsable again");
|
" unparsable again");
|
||||||
if (UseTLAB) {
|
const bool use_tlab = UseTLAB;
|
||||||
fill_all_tlabs(retire_tlabs);
|
const bool deferred = _defer_initial_card_mark;
|
||||||
|
// The main thread starts allocating via a TLAB even before it
|
||||||
|
// has added itself to the threads list at vm boot-up.
|
||||||
|
assert(!use_tlab || Threads::first() != NULL,
|
||||||
|
"Attempt to fill tlabs before main thread has been added"
|
||||||
|
" to threads list is doomed to failure!");
|
||||||
|
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
|
||||||
|
if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
|
||||||
|
#ifdef COMPILER2
|
||||||
|
// The deferred store barriers must all have been flushed to the
|
||||||
|
// card-table (or other remembered set structure) before GC starts
|
||||||
|
// processing the card-table (or other remembered set).
|
||||||
|
if (deferred) flush_deferred_store_barrier(thread);
|
||||||
|
#else
|
||||||
|
assert(!deferred, "Should be false");
|
||||||
|
assert(thread->deferred_card_mark().is_empty(), "Should be empty");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,6 +51,9 @@ class CollectedHeap : public CHeapObj {
|
|||||||
// Used for filler objects (static, but initialized in ctor).
|
// Used for filler objects (static, but initialized in ctor).
|
||||||
static size_t _filler_array_max_size;
|
static size_t _filler_array_max_size;
|
||||||
|
|
||||||
|
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
||||||
|
bool _defer_initial_card_mark;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MemRegion _reserved;
|
MemRegion _reserved;
|
||||||
BarrierSet* _barrier_set;
|
BarrierSet* _barrier_set;
|
||||||
@ -70,13 +73,16 @@ class CollectedHeap : public CHeapObj {
|
|||||||
// Constructor
|
// Constructor
|
||||||
CollectedHeap();
|
CollectedHeap();
|
||||||
|
|
||||||
|
// Do common initializations that must follow instance construction,
|
||||||
|
// for example, those needing virtual calls.
|
||||||
|
// This code could perhaps be moved into initialize() but would
|
||||||
|
// be slightly more awkward because we want the latter to be a
|
||||||
|
// pure virtual.
|
||||||
|
void pre_initialize();
|
||||||
|
|
||||||
// Create a new tlab
|
// Create a new tlab
|
||||||
virtual HeapWord* allocate_new_tlab(size_t size);
|
virtual HeapWord* allocate_new_tlab(size_t size);
|
||||||
|
|
||||||
// Fix up tlabs to make the heap well-formed again,
|
|
||||||
// optionally retiring the tlabs.
|
|
||||||
virtual void fill_all_tlabs(bool retire);
|
|
||||||
|
|
||||||
// Accumulate statistics on all tlabs.
|
// Accumulate statistics on all tlabs.
|
||||||
virtual void accumulate_statistics_all_tlabs();
|
virtual void accumulate_statistics_all_tlabs();
|
||||||
|
|
||||||
@ -127,14 +133,14 @@ class CollectedHeap : public CHeapObj {
|
|||||||
static inline size_t filler_array_max_size();
|
static inline size_t filler_array_max_size();
|
||||||
|
|
||||||
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
|
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
|
||||||
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
|
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
|
||||||
|
|
||||||
// Fill with a single array; caller must ensure filler_array_min_size() <=
|
// Fill with a single array; caller must ensure filler_array_min_size() <=
|
||||||
// words <= filler_array_max_size().
|
// words <= filler_array_max_size().
|
||||||
static inline void fill_with_array(HeapWord* start, size_t words);
|
static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
|
||||||
|
|
||||||
// Fill with a single object (either an int array or a java.lang.Object).
|
// Fill with a single object (either an int array or a java.lang.Object).
|
||||||
static inline void fill_with_object_impl(HeapWord* start, size_t words);
|
static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
|
||||||
|
|
||||||
// Verification functions
|
// Verification functions
|
||||||
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
|
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
|
||||||
@ -338,14 +344,14 @@ class CollectedHeap : public CHeapObj {
|
|||||||
return size_t(align_object_size(oopDesc::header_size()));
|
return size_t(align_object_size(oopDesc::header_size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fill_with_objects(HeapWord* start, size_t words);
|
static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
|
||||||
|
|
||||||
static void fill_with_object(HeapWord* start, size_t words);
|
static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
|
||||||
static void fill_with_object(MemRegion region) {
|
static void fill_with_object(MemRegion region, bool zap = true) {
|
||||||
fill_with_object(region.start(), region.word_size());
|
fill_with_object(region.start(), region.word_size(), zap);
|
||||||
}
|
}
|
||||||
static void fill_with_object(HeapWord* start, HeapWord* end) {
|
static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
|
||||||
fill_with_object(start, pointer_delta(end, start));
|
fill_with_object(start, pointer_delta(end, start), zap);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some heaps may offer a contiguous region for shared non-blocking
|
// Some heaps may offer a contiguous region for shared non-blocking
|
||||||
@ -431,14 +437,25 @@ class CollectedHeap : public CHeapObj {
|
|||||||
// promises to call this function on such a slow-path-allocated
|
// promises to call this function on such a slow-path-allocated
|
||||||
// object before performing initializations that have elided
|
// object before performing initializations that have elided
|
||||||
// store barriers. Returns new_obj, or maybe a safer copy thereof.
|
// store barriers. Returns new_obj, or maybe a safer copy thereof.
|
||||||
virtual oop defer_store_barrier(JavaThread* thread, oop new_obj);
|
virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
|
||||||
|
|
||||||
// Answers whether an initializing store to a new object currently
|
// Answers whether an initializing store to a new object currently
|
||||||
// allocated at the given address doesn't need a (deferred) store
|
// allocated at the given address doesn't need a store
|
||||||
// barrier. Returns "true" if it doesn't need an initializing
|
// barrier. Returns "true" if it doesn't need an initializing
|
||||||
// store barrier; answers "false" if it does.
|
// store barrier; answers "false" if it does.
|
||||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
|
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
|
||||||
|
|
||||||
|
// If a compiler is eliding store barriers for TLAB-allocated objects,
|
||||||
|
// we will be informed of a slow-path allocation by a call
|
||||||
|
// to new_store_pre_barrier() above. Such a call precedes the
|
||||||
|
// initialization of the object itself, and no post-store-barriers will
|
||||||
|
// be issued. Some heap types require that the barrier strictly follows
|
||||||
|
// the initializing stores. (This is currently implemented by deferring the
|
||||||
|
// barrier until the next slow-path allocation or gc-related safepoint.)
|
||||||
|
// This interface answers whether a particular heap type needs the card
|
||||||
|
// mark to be thus strictly sequenced after the stores.
|
||||||
|
virtual bool card_mark_must_follow_store() const = 0;
|
||||||
|
|
||||||
// If the CollectedHeap was asked to defer a store barrier above,
|
// If the CollectedHeap was asked to defer a store barrier above,
|
||||||
// this informs it to flush such a deferred store barrier to the
|
// this informs it to flush such a deferred store barrier to the
|
||||||
// remembered set.
|
// remembered set.
|
||||||
|
@ -51,6 +51,8 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
jint GenCollectedHeap::initialize() {
|
jint GenCollectedHeap::initialize() {
|
||||||
|
CollectedHeap::pre_initialize();
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
_n_gens = gen_policy()->number_of_generations();
|
_n_gens = gen_policy()->number_of_generations();
|
||||||
|
|
||||||
@ -129,6 +131,7 @@ jint GenCollectedHeap::initialize() {
|
|||||||
|
|
||||||
_rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
|
_rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
|
||||||
set_barrier_set(rem_set()->bs());
|
set_barrier_set(rem_set()->bs());
|
||||||
|
|
||||||
_gch = this;
|
_gch = this;
|
||||||
|
|
||||||
for (i = 0; i < _n_gens; i++) {
|
for (i = 0; i < _n_gens; i++) {
|
||||||
|
@ -260,6 +260,10 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool card_mark_must_follow_store() const {
|
||||||
|
return UseConcMarkSweepGC;
|
||||||
|
}
|
||||||
|
|
||||||
// We don't need barriers for stores to objects in the
|
// We don't need barriers for stores to objects in the
|
||||||
// young gen and, a fortiori, for initializing stores to
|
// young gen and, a fortiori, for initializing stores to
|
||||||
// objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
|
// objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
|
||||||
|
@ -100,7 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
|
|||||||
void ThreadLocalAllocBuffer::make_parsable(bool retire) {
|
void ThreadLocalAllocBuffer::make_parsable(bool retire) {
|
||||||
if (end() != NULL) {
|
if (end() != NULL) {
|
||||||
invariants();
|
invariants();
|
||||||
CollectedHeap::fill_with_object(top(), hard_end());
|
CollectedHeap::fill_with_object(top(), hard_end(), retire);
|
||||||
|
|
||||||
if (retire || ZeroTLAB) { // "Reset" the TLAB
|
if (retire || ZeroTLAB) { // "Reset" the TLAB
|
||||||
set_start(NULL);
|
set_start(NULL);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
|
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -27,8 +27,13 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
|
|||||||
HeapWord* obj = top();
|
HeapWord* obj = top();
|
||||||
if (pointer_delta(end(), obj) >= size) {
|
if (pointer_delta(end(), obj) >= size) {
|
||||||
// successful thread-local allocation
|
// successful thread-local allocation
|
||||||
|
#ifdef ASSERT
|
||||||
DEBUG_ONLY(Copy::fill_to_words(obj, size, badHeapWordVal));
|
// Skip mangling the space corresponding to the object header to
|
||||||
|
// ensure that the returned space is not considered parsable by
|
||||||
|
// any concurrent GC thread.
|
||||||
|
size_t hdr_size = CollectedHeap::min_fill_size();
|
||||||
|
Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
|
||||||
|
#endif // ASSERT
|
||||||
// This addition is safe because we know that top is
|
// This addition is safe because we know that top is
|
||||||
// at least size below end, so the add can't wrap.
|
// at least size below end, so the add can't wrap.
|
||||||
set_top(obj + size);
|
set_top(obj + size);
|
||||||
|
@ -3259,9 +3259,10 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
|||||||
if (use_ReduceInitialCardMarks()
|
if (use_ReduceInitialCardMarks()
|
||||||
&& obj == just_allocated_object(control())) {
|
&& obj == just_allocated_object(control())) {
|
||||||
// We can skip marks on a freshly-allocated object in Eden.
|
// We can skip marks on a freshly-allocated object in Eden.
|
||||||
// Keep this code in sync with maybe_defer_card_mark() in runtime.cpp.
|
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
|
||||||
// That routine informs GC to take appropriate compensating steps
|
// That routine informs GC to take appropriate compensating steps,
|
||||||
// so as to make this card-mark elision safe.
|
// upon a slow-path allocation, so as to make this card-mark
|
||||||
|
// elision safe.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ const char* OptoRuntime::stub_name(address entry) {
|
|||||||
// We failed the fast-path allocation. Now we need to do a scavenge or GC
|
// We failed the fast-path allocation. Now we need to do a scavenge or GC
|
||||||
// and try allocation again.
|
// and try allocation again.
|
||||||
|
|
||||||
void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
|
void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
|
||||||
// After any safepoint, just before going back to compiled code,
|
// After any safepoint, just before going back to compiled code,
|
||||||
// we inform the GC that we will be doing initializing writes to
|
// we inform the GC that we will be doing initializing writes to
|
||||||
// this object in the future without emitting card-marks, so
|
// this object in the future without emitting card-marks, so
|
||||||
@ -156,7 +156,7 @@ void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
|
|||||||
assert(Universe::heap()->can_elide_tlab_store_barriers(),
|
assert(Universe::heap()->can_elide_tlab_store_barriers(),
|
||||||
"compiler must check this first");
|
"compiler must check this first");
|
||||||
// GC may decide to give back a safer copy of new_obj.
|
// GC may decide to give back a safer copy of new_obj.
|
||||||
new_obj = Universe::heap()->defer_store_barrier(thread, new_obj);
|
new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
|
||||||
thread->set_vm_result(new_obj);
|
thread->set_vm_result(new_obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(klassOopDesc* klass, JavaThrea
|
|||||||
|
|
||||||
if (GraphKit::use_ReduceInitialCardMarks()) {
|
if (GraphKit::use_ReduceInitialCardMarks()) {
|
||||||
// inform GC that we won't do card marks for initializing writes.
|
// inform GC that we won't do card marks for initializing writes.
|
||||||
maybe_defer_card_mark(thread);
|
new_store_pre_barrier(thread);
|
||||||
}
|
}
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
@ -239,7 +239,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(klassOopDesc* array_type, int len
|
|||||||
|
|
||||||
if (GraphKit::use_ReduceInitialCardMarks()) {
|
if (GraphKit::use_ReduceInitialCardMarks()) {
|
||||||
// inform GC that we won't do card marks for initializing writes.
|
// inform GC that we won't do card marks for initializing writes.
|
||||||
maybe_defer_card_mark(thread);
|
new_store_pre_barrier(thread);
|
||||||
}
|
}
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
@ -133,8 +133,9 @@ class OptoRuntime : public AllStatic {
|
|||||||
// Allocate storage for a objArray or typeArray
|
// Allocate storage for a objArray or typeArray
|
||||||
static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
|
static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
|
||||||
|
|
||||||
// Post-slow-path-allocation step for implementing ReduceInitialCardMarks:
|
// Post-slow-path-allocation, pre-initializing-stores step for
|
||||||
static void maybe_defer_card_mark(JavaThread* thread);
|
// implementing ReduceInitialCardMarks
|
||||||
|
static void new_store_pre_barrier(JavaThread* thread);
|
||||||
|
|
||||||
// Allocate storage for a multi-dimensional arrays
|
// Allocate storage for a multi-dimensional arrays
|
||||||
// Note: needs to be fixed for arbitrary number of dimensions
|
// Note: needs to be fixed for arbitrary number of dimensions
|
||||||
|
@ -2012,6 +2012,10 @@ class CommandLineFlags {
|
|||||||
diagnostic(bool, GCParallelVerificationEnabled, true, \
|
diagnostic(bool, GCParallelVerificationEnabled, true, \
|
||||||
"Enable parallel memory system verification") \
|
"Enable parallel memory system verification") \
|
||||||
\
|
\
|
||||||
|
diagnostic(bool, DeferInitialCardMark, false, \
|
||||||
|
"When +ReduceInitialCardMarks, explicitly defer any that " \
|
||||||
|
"may arise from new_pre_store_barrier") \
|
||||||
|
\
|
||||||
diagnostic(bool, VerifyRememberedSets, false, \
|
diagnostic(bool, VerifyRememberedSets, false, \
|
||||||
"Verify GC remembered sets") \
|
"Verify GC remembered sets") \
|
||||||
\
|
\
|
||||||
|
@ -2357,9 +2357,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||||
// Flush deferred store-barriers, if any, associated with
|
// Verify that the deferred card marks have been flushed.
|
||||||
// initializing stores done by this JavaThread in the current epoch.
|
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
|
||||||
Universe::heap()->flush_deferred_store_barrier(this);
|
|
||||||
|
|
||||||
// The ThreadProfiler oops_do is done from FlatProfiler::oops_do
|
// The ThreadProfiler oops_do is done from FlatProfiler::oops_do
|
||||||
// since there may be more than one thread using each ThreadProfiler.
|
// since there may be more than one thread using each ThreadProfiler.
|
||||||
|
@ -309,6 +309,7 @@ static inline uint64_t cast_uint64_t(size_t x)
|
|||||||
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
|
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
|
||||||
nonstatic_field(SharedHeap, _perm_gen, PermGen*) \
|
nonstatic_field(SharedHeap, _perm_gen, PermGen*) \
|
||||||
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
|
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
|
||||||
|
nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \
|
||||||
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
|
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
|
||||||
nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
|
nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
|
||||||
nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
|
nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
|
||||||
|
Loading…
Reference in New Issue
Block a user