8335356: Shenandoah: Improve concurrent cleanup locking

Reviewed-by: ysr, shade
This commit is contained in:
Xiaolong Peng 2024-07-11 08:47:15 +00:00 committed by Aleksey Shipilev
parent 62cbf70346
commit b32e4a68bc
2 changed files with 17 additions and 4 deletions

View File

@ -577,6 +577,7 @@ void ShenandoahRegionPartitions::assert_bounds() {
ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
_heap(heap),
_partitions(max_regions, this),
_trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
_right_to_left_bias(false),
_alloc_bias_weight(0)
{
@ -899,7 +900,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
return _heap->get_region(beg)->bottom();
}
void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
if (r->is_trash()) {
_heap->decrease_used(r->used());
r->recycle();
@ -910,13 +911,24 @@ void ShenandoahFreeSet::recycle_trash() {
// lock is not reentrable, check we don't have it
shenandoah_assert_not_heaplocked();
size_t count = 0;
for (size_t i = 0; i < _heap->num_regions(); i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
if (r->is_trash()) {
ShenandoahHeapLocker locker(_heap->lock());
try_recycle_trashed(r);
_trash_regions[count++] = r;
}
}
// Relinquish the lock after this much time passed.
static constexpr jlong deadline_ns = 30000; // 30 us
size_t idx = 0;
while (idx < count) {
os::naked_yield(); // Yield to allow allocators to take the lock
ShenandoahHeapLocker locker(_heap->lock());
const jlong deadline = os::javaTimeNanos() + deadline_ns;
while (idx < count && os::javaTimeNanos() < deadline) {
try_recycle_trashed(_trash_regions[idx++]);
}
SpinPause(); // allow allocators to take the lock
}
}

View File

@ -258,6 +258,7 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
private:
ShenandoahHeap* const _heap;
ShenandoahRegionPartitions _partitions;
ShenandoahHeapRegion** _trash_regions;
// Mutator allocations are biased from left-to-right or from right-to-left based on which end of mutator range
// is most likely to hold partially used regions. In general, we want to finish consuming partially used