8336463: Parallel: Add PSOldGen::expand_and_allocate

Reviewed-by: iwalulya, zgu
This commit is contained in:
Albert Mingkun Yang 2024-07-17 09:25:59 +00:00
parent b9b0b8504e
commit 70f3e99016
5 changed files with 29 additions and 10 deletions

View File

@ -189,7 +189,12 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
// Only used by oldgen allocation.
bool MutableSpace::needs_expand(size_t word_size) const {
assert_lock_strong(PSOldGenExpand_lock);
#ifdef ASSERT
// If called by VM thread, locking is not needed.
if (!Thread::current()->is_VM_thread()) {
assert_lock_strong(PSOldGenExpand_lock);
}
#endif
// Holding the lock means end is stable. So while top may be advancing
// via concurrent allocations, there is no need to order the reads of top
// and end here, unlike in cas_allocate.

View File

@ -129,7 +129,7 @@ class MutableSpace: public CHeapObj<mtGC> {
// Return true if this space needs to be expanded in order to satisfy an
// allocation request of the indicated size. Concurrent allocations and
// resizes may change the result of a later call. Used by oldgen allocator.
// precondition: holding PSOldGenExpand_lock
// precondition: holding PSOldGenExpand_lock if not VM thread
bool needs_expand(size_t word_size) const;
// Iteration.

View File

@ -434,8 +434,7 @@ HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tl
result = young_gen()->allocate(size);
if (result == nullptr && !is_tlab) {
// auto expand inside
result = old_gen()->allocate(size);
result = old_gen()->expand_and_allocate(size);
}
return result; // Could be null if we are out of space.
}

View File

@ -120,6 +120,18 @@ void PSOldGen::initialize_performance_counters(const char* perf_data_name, int l
_object_space, _gen_counters);
}
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
assert(Thread::current()->is_VM_thread(), "precondition");
if (object_space()->needs_expand(word_size)) {
expand(word_size*HeapWordSize);
}
// Reuse the CAS API even though this is VM thread in safepoint. This method
// is not invoked repeatedly, so the CAS overhead should be negligible.
return cas_allocate_noexpand(word_size);
}
size_t PSOldGen::num_iterable_blocks() const {
return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
}
@ -170,9 +182,13 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
}
bool PSOldGen::expand(size_t bytes) {
assert_lock_strong(PSOldGenExpand_lock);
#ifdef ASSERT
if (!Thread::current()->is_VM_thread()) {
assert_lock_strong(PSOldGenExpand_lock);
}
assert_locked_or_safepoint(Heap_lock);
assert(bytes > 0, "precondition");
#endif
const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_up(bytes, alignment);
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
@ -208,8 +224,6 @@ bool PSOldGen::expand(size_t bytes) {
}
bool PSOldGen::expand_by(size_t bytes) {
assert_lock_strong(PSOldGenExpand_lock);
assert_locked_or_safepoint(Heap_lock);
assert(bytes > 0, "precondition");
bool result = virtual_space()->expand_by(bytes);
if (result) {
@ -244,9 +258,6 @@ bool PSOldGen::expand_by(size_t bytes) {
}
bool PSOldGen::expand_to_reserved() {
assert_lock_strong(PSOldGenExpand_lock);
assert_locked_or_safepoint(Heap_lock);
bool result = false;
const size_t remaining_bytes = virtual_space()->uncommitted_size();
if (remaining_bytes > 0) {

View File

@ -118,6 +118,7 @@ class PSOldGen : public CHeapObj<mtGC> {
// Calculating new sizes
void resize(size_t desired_free_space);
// Invoked by mutators and GC-workers.
HeapWord* allocate(size_t word_size) {
HeapWord* res;
do {
@ -127,6 +128,9 @@ class PSOldGen : public CHeapObj<mtGC> {
return res;
}
// Invoked by VM thread inside a safepoint.
HeapWord* expand_and_allocate(size_t word_size);
// Iteration.
void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); }
void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); }