7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine
Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit. Reviewed-by: brutisso, ehelin, johnc
This commit is contained in:
parent
cf6d13410c
commit
89120e7827
@ -854,7 +854,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
|||||||
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
|
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
|
||||||
|
|
||||||
unsigned int dummy_gc_count_before;
|
unsigned int dummy_gc_count_before;
|
||||||
return attempt_allocation(word_size, &dummy_gc_count_before);
|
int dummy_gclocker_retry_count = 0;
|
||||||
|
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord*
|
HeapWord*
|
||||||
@ -863,14 +864,14 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
|
|
||||||
// Loop until the allocation is satisified, or unsatisfied after GC.
|
// Loop until the allocation is satisified, or unsatisfied after GC.
|
||||||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||||
unsigned int gc_count_before;
|
unsigned int gc_count_before;
|
||||||
|
|
||||||
HeapWord* result = NULL;
|
HeapWord* result = NULL;
|
||||||
if (!isHumongous(word_size)) {
|
if (!isHumongous(word_size)) {
|
||||||
result = attempt_allocation(word_size, &gc_count_before);
|
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
|
||||||
} else {
|
} else {
|
||||||
result = attempt_allocation_humongous(word_size, &gc_count_before);
|
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
|
||||||
}
|
}
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
@ -894,6 +895,9 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
|
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
assert(op.result() == NULL,
|
assert(op.result() == NULL,
|
||||||
"the result should be NULL if the VM op did not succeed");
|
"the result should be NULL if the VM op did not succeed");
|
||||||
}
|
}
|
||||||
@ -910,7 +914,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||||
unsigned int *gc_count_before_ret) {
|
unsigned int *gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret) {
|
||||||
// Make sure you read the note in attempt_allocation_humongous().
|
// Make sure you read the note in attempt_allocation_humongous().
|
||||||
|
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
@ -986,10 +991,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
*gc_count_before_ret = total_collections();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
// The GCLocker is either active or the GCLocker initiated
|
// The GCLocker is either active or the GCLocker initiated
|
||||||
// GC has not yet been performed. Stall until it is and
|
// GC has not yet been performed. Stall until it is and
|
||||||
// then retry the allocation.
|
// then retry the allocation.
|
||||||
GC_locker::stall_until_clear();
|
GC_locker::stall_until_clear();
|
||||||
|
(*gclocker_retry_count_ret) += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can reach here if we were unsuccessul in scheduling a
|
// We can reach here if we were unsuccessul in scheduling a
|
||||||
@ -1019,7 +1030,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
unsigned int * gc_count_before_ret) {
|
unsigned int * gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret) {
|
||||||
// The structure of this method has a lot of similarities to
|
// The structure of this method has a lot of similarities to
|
||||||
// attempt_allocation_slow(). The reason these two were not merged
|
// attempt_allocation_slow(). The reason these two were not merged
|
||||||
// into a single one is that such a method would require several "if
|
// into a single one is that such a method would require several "if
|
||||||
@ -1104,10 +1116,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
*gc_count_before_ret = total_collections();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
// The GCLocker is either active or the GCLocker initiated
|
// The GCLocker is either active or the GCLocker initiated
|
||||||
// GC has not yet been performed. Stall until it is and
|
// GC has not yet been performed. Stall until it is and
|
||||||
// then retry the allocation.
|
// then retry the allocation.
|
||||||
GC_locker::stall_until_clear();
|
GC_locker::stall_until_clear();
|
||||||
|
(*gclocker_retry_count_ret) += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can reach here if we were unsuccessul in scheduling a
|
// We can reach here if we were unsuccessul in scheduling a
|
||||||
|
@ -559,18 +559,21 @@ protected:
|
|||||||
// the mutator alloc region without taking the Heap_lock. This
|
// the mutator alloc region without taking the Heap_lock. This
|
||||||
// should only be used for non-humongous allocations.
|
// should only be used for non-humongous allocations.
|
||||||
inline HeapWord* attempt_allocation(size_t word_size,
|
inline HeapWord* attempt_allocation(size_t word_size,
|
||||||
unsigned int* gc_count_before_ret);
|
unsigned int* gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret);
|
||||||
|
|
||||||
// Second-level mutator allocation attempt: take the Heap_lock and
|
// Second-level mutator allocation attempt: take the Heap_lock and
|
||||||
// retry the allocation attempt, potentially scheduling a GC
|
// retry the allocation attempt, potentially scheduling a GC
|
||||||
// pause. This should only be used for non-humongous allocations.
|
// pause. This should only be used for non-humongous allocations.
|
||||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||||
unsigned int* gc_count_before_ret);
|
unsigned int* gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret);
|
||||||
|
|
||||||
// Takes the Heap_lock and attempts a humongous allocation. It can
|
// Takes the Heap_lock and attempts a humongous allocation. It can
|
||||||
// potentially schedule a GC pause.
|
// potentially schedule a GC pause.
|
||||||
HeapWord* attempt_allocation_humongous(size_t word_size,
|
HeapWord* attempt_allocation_humongous(size_t word_size,
|
||||||
unsigned int* gc_count_before_ret);
|
unsigned int* gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret);
|
||||||
|
|
||||||
// Allocation attempt that should be called during safepoints (e.g.,
|
// Allocation attempt that should be called during safepoints (e.g.,
|
||||||
// at the end of a successful GC). expect_null_mutator_alloc_region
|
// at the end of a successful GC). expect_null_mutator_alloc_region
|
||||||
|
@ -60,7 +60,8 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
|||||||
|
|
||||||
inline HeapWord*
|
inline HeapWord*
|
||||||
G1CollectedHeap::attempt_allocation(size_t word_size,
|
G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||||
unsigned int* gc_count_before_ret) {
|
unsigned int* gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "attempt_allocation() should not "
|
assert(!isHumongous(word_size), "attempt_allocation() should not "
|
||||||
"be called for humongous allocation requests");
|
"be called for humongous allocation requests");
|
||||||
@ -68,7 +69,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size,
|
|||||||
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
|
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
result = attempt_allocation_slow(word_size, gc_count_before_ret);
|
result = attempt_allocation_slow(word_size,
|
||||||
|
gc_count_before_ret,
|
||||||
|
gclocker_retry_count_ret);
|
||||||
}
|
}
|
||||||
assert_heap_not_locked();
|
assert_heap_not_locked();
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
|
@ -326,6 +326,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
|
|
||||||
uint loop_count = 0;
|
uint loop_count = 0;
|
||||||
uint gc_count = 0;
|
uint gc_count = 0;
|
||||||
|
int gclocker_stalled_count = 0;
|
||||||
|
|
||||||
while (result == NULL) {
|
while (result == NULL) {
|
||||||
// We don't want to have multiple collections for a single filled generation.
|
// We don't want to have multiple collections for a single filled generation.
|
||||||
@ -354,6 +355,10 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// Failed to allocate without a gc.
|
// Failed to allocate without a gc.
|
||||||
if (GC_locker::is_active_and_needs_gc()) {
|
if (GC_locker::is_active_and_needs_gc()) {
|
||||||
// If this thread is not in a jni critical section, we stall
|
// If this thread is not in a jni critical section, we stall
|
||||||
@ -366,6 +371,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
if (!jthr->in_critical()) {
|
if (!jthr->in_critical()) {
|
||||||
MutexUnlocker mul(Heap_lock);
|
MutexUnlocker mul(Heap_lock);
|
||||||
GC_locker::stall_until_clear();
|
GC_locker::stall_until_clear();
|
||||||
|
gclocker_stalled_count += 1;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
if (CheckJNICalls) {
|
if (CheckJNICalls) {
|
||||||
|
@ -532,7 +532,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
|
|
||||||
// Loop until the allocation is satisified,
|
// Loop until the allocation is satisified,
|
||||||
// or unsatisfied after GC.
|
// or unsatisfied after GC.
|
||||||
for (int try_count = 1; /* return or throw */; try_count += 1) {
|
for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
|
||||||
HandleMark hm; // discard any handles allocated in each iteration
|
HandleMark hm; // discard any handles allocated in each iteration
|
||||||
|
|
||||||
// First allocation attempt is lock-free.
|
// First allocation attempt is lock-free.
|
||||||
@ -576,6 +576,10 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
||||||
|
return NULL; // we didn't get to do a GC and we didn't get any memory
|
||||||
|
}
|
||||||
|
|
||||||
// If this thread is not in a jni critical section, we stall
|
// If this thread is not in a jni critical section, we stall
|
||||||
// the requestor until the critical section has cleared and
|
// the requestor until the critical section has cleared and
|
||||||
// GC allowed. When the critical section clears, a GC is
|
// GC allowed. When the critical section clears, a GC is
|
||||||
@ -587,6 +591,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
|||||||
MutexUnlocker mul(Heap_lock);
|
MutexUnlocker mul(Heap_lock);
|
||||||
// Wait for JNI critical section to be exited
|
// Wait for JNI critical section to be exited
|
||||||
GC_locker::stall_until_clear();
|
GC_locker::stall_until_clear();
|
||||||
|
gclocker_stalled_count += 1;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
if (CheckJNICalls) {
|
if (CheckJNICalls) {
|
||||||
|
@ -1402,6 +1402,10 @@ class CommandLineFlags {
|
|||||||
"How much the GC can expand the eden by while the GC locker " \
|
"How much the GC can expand the eden by while the GC locker " \
|
||||||
"is active (as a percentage)") \
|
"is active (as a percentage)") \
|
||||||
\
|
\
|
||||||
|
diagnostic(intx, GCLockerRetryAllocationCount, 2, \
|
||||||
|
"Number of times to retry allocations when" \
|
||||||
|
" blocked by the GC locker") \
|
||||||
|
\
|
||||||
develop(bool, UseCMSAdaptiveFreeLists, true, \
|
develop(bool, UseCMSAdaptiveFreeLists, true, \
|
||||||
"Use Adaptive Free Lists in the CMS generation") \
|
"Use Adaptive Free Lists in the CMS generation") \
|
||||||
\
|
\
|
||||||
|
Loading…
x
Reference in New Issue
Block a user