8260044: Parallel GC: Concurrent allocation after heap expansion may cause unnecessary full gc
8260045: Parallel GC: Waiting on ExpandHeap_lock may cause "expansion storm" Loop to retry allocation if expand succeeds. Treat space available after obtaining expand lock as expand success. Reviewed-by: tschatzl, iwalulya, sjohanss
This commit is contained in:
parent
92ff891877
commit
6a84ec68c3
@ -215,6 +215,15 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
|||||||
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
|
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only used by oldgen allocation.
|
||||||
|
bool MutableSpace::needs_expand(size_t word_size) const {
|
||||||
|
assert_lock_strong(ExpandHeap_lock);
|
||||||
|
// Holding the lock means end is stable. So while top may be advancing
|
||||||
|
// via concurrent allocations, there is no need to order the reads of top
|
||||||
|
// and end here, unlike in cas_allocate.
|
||||||
|
return pointer_delta(end(), top()) < word_size;
|
||||||
|
}
|
||||||
|
|
||||||
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
|
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
|
||||||
HeapWord* obj_addr = bottom();
|
HeapWord* obj_addr = bottom();
|
||||||
HeapWord* t = top();
|
HeapWord* t = top();
|
||||||
|
@ -142,6 +142,11 @@ class MutableSpace: public CHeapObj<mtGC> {
|
|||||||
virtual HeapWord* cas_allocate(size_t word_size);
|
virtual HeapWord* cas_allocate(size_t word_size);
|
||||||
// Optional deallocation. Used in NUMA-allocator.
|
// Optional deallocation. Used in NUMA-allocator.
|
||||||
bool cas_deallocate(HeapWord *obj, size_t size);
|
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||||
|
// Return true if this space needs to be expanded in order to satisfy an
|
||||||
|
// allocation request of the indicated size. Concurrent allocations and
|
||||||
|
// resizes may change the result of a later call. Used by oldgen allocator.
|
||||||
|
// precondition: holding ExpandHeap_lock
|
||||||
|
bool needs_expand(size_t word_size) const;
|
||||||
|
|
||||||
// Iteration.
|
// Iteration.
|
||||||
void oop_iterate(OopIterateClosure* cl);
|
void oop_iterate(OopIterateClosure* cl);
|
||||||
|
@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
|
bool PSOldGen::expand_for_allocate(size_t word_size) {
|
||||||
expand(word_size*HeapWordSize);
|
assert(word_size > 0, "allocating zero words?");
|
||||||
|
bool result = true;
|
||||||
|
{
|
||||||
|
MutexLocker x(ExpandHeap_lock);
|
||||||
|
// Avoid "expand storms" by rechecking available space after obtaining
|
||||||
|
// the lock, because another thread may have already made sufficient
|
||||||
|
// space available. If insufficient space available, that will remain
|
||||||
|
// true until we expand, since we have the lock. Other threads may take
|
||||||
|
// the space we need before we can allocate it, regardless of whether we
|
||||||
|
// expand. That's okay, we'll just try expanding again.
|
||||||
|
if (object_space()->needs_expand(word_size)) {
|
||||||
|
result = expand(word_size*HeapWordSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (GCExpandToAllocateDelayMillis > 0) {
|
if (GCExpandToAllocateDelayMillis > 0) {
|
||||||
os::naked_sleep(GCExpandToAllocateDelayMillis);
|
os::naked_sleep(GCExpandToAllocateDelayMillis);
|
||||||
}
|
}
|
||||||
return cas_allocate_noexpand(word_size);
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSOldGen::expand(size_t bytes) {
|
bool PSOldGen::expand(size_t bytes) {
|
||||||
if (bytes == 0) {
|
assert_lock_strong(ExpandHeap_lock);
|
||||||
return;
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
}
|
assert(bytes > 0, "precondition");
|
||||||
MutexLocker x(ExpandHeap_lock);
|
|
||||||
const size_t alignment = virtual_space()->alignment();
|
const size_t alignment = virtual_space()->alignment();
|
||||||
size_t aligned_bytes = align_up(bytes, alignment);
|
size_t aligned_bytes = align_up(bytes, alignment);
|
||||||
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
|
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
|
||||||
@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) {
|
|||||||
// providing a page per lgroup. Alignment is larger or equal to the page size.
|
// providing a page per lgroup. Alignment is larger or equal to the page size.
|
||||||
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
|
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
|
||||||
}
|
}
|
||||||
if (aligned_bytes == 0){
|
if (aligned_bytes == 0) {
|
||||||
// The alignment caused the number of bytes to wrap. An expand_by(0) will
|
// The alignment caused the number of bytes to wrap. A call to expand
|
||||||
// return true with the implication that and expansion was done when it
|
// implies a best effort to expand by "bytes" but not a guarantee. Align
|
||||||
// was not. A call to expand implies a best effort to expand by "bytes"
|
// down to give a best effort. This is likely the most that the generation
|
||||||
// but not a guarantee. Align down to give a best effort. This is likely
|
// can expand since it has some capacity to start with.
|
||||||
// the most that the generation can expand since it has some capacity to
|
|
||||||
// start with.
|
|
||||||
aligned_bytes = align_down(bytes, alignment);
|
aligned_bytes = align_down(bytes, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) {
|
|||||||
if (success && GCLocker::is_active_and_needs_gc()) {
|
if (success && GCLocker::is_active_and_needs_gc()) {
|
||||||
log_debug(gc)("Garbage collection disabled, expanded heap instead");
|
log_debug(gc)("Garbage collection disabled, expanded heap instead");
|
||||||
}
|
}
|
||||||
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PSOldGen::expand_by(size_t bytes) {
|
bool PSOldGen::expand_by(size_t bytes) {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(ExpandHeap_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
if (bytes == 0) {
|
assert(bytes > 0, "precondition");
|
||||||
return true; // That's what virtual_space()->expand_by(0) would return
|
|
||||||
}
|
|
||||||
bool result = virtual_space()->expand_by(bytes);
|
bool result = virtual_space()->expand_by(bytes);
|
||||||
if (result) {
|
if (result) {
|
||||||
if (ZapUnusedHeapArea) {
|
if (ZapUnusedHeapArea) {
|
||||||
@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() {
|
|||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(ExpandHeap_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
|
|
||||||
bool result = true;
|
bool result = false;
|
||||||
const size_t remaining_bytes = virtual_space()->uncommitted_size();
|
const size_t remaining_bytes = virtual_space()->uncommitted_size();
|
||||||
if (remaining_bytes > 0) {
|
if (remaining_bytes > 0) {
|
||||||
result = expand_by(remaining_bytes);
|
result = expand_by(remaining_bytes);
|
||||||
@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) {
|
|||||||
}
|
}
|
||||||
if (new_size > current_size) {
|
if (new_size > current_size) {
|
||||||
size_t change_bytes = new_size - current_size;
|
size_t change_bytes = new_size - current_size;
|
||||||
|
MutexLocker x(ExpandHeap_lock);
|
||||||
expand(change_bytes);
|
expand(change_bytes);
|
||||||
} else {
|
} else {
|
||||||
size_t change_bytes = current_size - new_size;
|
size_t change_bytes = current_size - new_size;
|
||||||
// shrink doesn't grab this lock, expand does. Is that right?
|
|
||||||
MutexLocker x(ExpandHeap_lock);
|
MutexLocker x(ExpandHeap_lock);
|
||||||
shrink(change_bytes);
|
shrink(change_bytes);
|
||||||
}
|
}
|
||||||
|
@ -79,8 +79,8 @@ class PSOldGen : public CHeapObj<mtGC> {
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* expand_and_cas_allocate(size_t word_size);
|
bool expand_for_allocate(size_t word_size);
|
||||||
void expand(size_t bytes);
|
bool expand(size_t bytes);
|
||||||
bool expand_by(size_t bytes);
|
bool expand_by(size_t bytes);
|
||||||
bool expand_to_reserved();
|
bool expand_to_reserved();
|
||||||
|
|
||||||
@ -135,8 +135,12 @@ class PSOldGen : public CHeapObj<mtGC> {
|
|||||||
void resize(size_t desired_free_space);
|
void resize(size_t desired_free_space);
|
||||||
|
|
||||||
HeapWord* allocate(size_t word_size) {
|
HeapWord* allocate(size_t word_size) {
|
||||||
HeapWord* res = cas_allocate_noexpand(word_size);
|
HeapWord* res;
|
||||||
return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
|
do {
|
||||||
|
res = cas_allocate_noexpand(word_size);
|
||||||
|
// Retry failed allocation if expand succeeds.
|
||||||
|
} while ((res == nullptr) && expand_for_allocate(word_size));
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iteration.
|
// Iteration.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user