diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 7e1115e2b41..89be0f31f67 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -189,7 +189,12 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { // Only used by oldgen allocation. bool MutableSpace::needs_expand(size_t word_size) const { - assert_lock_strong(PSOldGenExpand_lock); +#ifdef ASSERT + // If called by VM thread, locking is not needed. + if (!Thread::current()->is_VM_thread()) { + assert_lock_strong(PSOldGenExpand_lock); + } +#endif // Holding the lock means end is stable. So while top may be advancing // via concurrent allocations, there is no need to order the reads of top // and end here, unlike in cas_allocate. diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp index 1add9c7735a..b48b1ebcc9f 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp @@ -129,7 +129,7 @@ class MutableSpace: public CHeapObj { // Return true if this space needs to be expanded in order to satisfy an // allocation request of the indicated size. Concurrent allocations and // resizes may change the result of a later call. Used by oldgen allocator. - // precondition: holding PSOldGenExpand_lock + // precondition: holding PSOldGenExpand_lock if not VM thread bool needs_expand(size_t word_size) const; // Iteration. diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index cee5a805cb7..5883b1cd607 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -434,8 +434,7 @@ HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tl result = young_gen()->allocate(size); if (result == nullptr && !is_tlab) { - // auto expand inside - result = old_gen()->allocate(size); + result = old_gen()->expand_and_allocate(size); } return result; // Could be null if we are out of space. } diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 4744811b425..2715ab90768 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -120,6 +120,18 @@ void PSOldGen::initialize_performance_counters(const char* perf_data_name, int l _object_space, _gen_counters); } +HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { + assert(SafepointSynchronize::is_at_safepoint(), "precondition"); + assert(Thread::current()->is_VM_thread(), "precondition"); + if (object_space()->needs_expand(word_size)) { + expand(word_size*HeapWordSize); + } + + // Reuse the CAS API even though this is VM thread in safepoint. This method + // is not invoked repeatedly, so the CAS overhead should be negligible. + return cas_allocate_noexpand(word_size); +} + size_t PSOldGen::num_iterable_blocks() const { return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize; } @@ -170,9 +182,13 @@ bool PSOldGen::expand_for_allocate(size_t word_size) { } bool PSOldGen::expand(size_t bytes) { - assert_lock_strong(PSOldGenExpand_lock); +#ifdef ASSERT + if (!Thread::current()->is_VM_thread()) { + assert_lock_strong(PSOldGenExpand_lock); + } assert_locked_or_safepoint(Heap_lock); assert(bytes > 0, "precondition"); +#endif const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_up(bytes, alignment); size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); @@ -208,8 +224,6 @@ bool PSOldGen::expand(size_t bytes) { } bool PSOldGen::expand_by(size_t bytes) { - assert_lock_strong(PSOldGenExpand_lock); - assert_locked_or_safepoint(Heap_lock); assert(bytes > 0, "precondition"); bool result = virtual_space()->expand_by(bytes); if (result) { @@ -244,9 +258,6 @@ bool PSOldGen::expand_by(size_t bytes) { } bool PSOldGen::expand_to_reserved() { - assert_lock_strong(PSOldGenExpand_lock); - assert_locked_or_safepoint(Heap_lock); - bool result = false; const size_t remaining_bytes = virtual_space()->uncommitted_size(); if (remaining_bytes > 0) { diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index 68cac7128da..09481d8ddde 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -118,6 +118,7 @@ class PSOldGen : public CHeapObj { // Calculating new sizes void resize(size_t desired_free_space); + // Invoked by mutators and GC-workers. HeapWord* allocate(size_t word_size) { HeapWord* res; do { @@ -127,6 +128,9 @@ class PSOldGen : public CHeapObj { return res; } + // Invoked by VM thread inside a safepoint. + HeapWord* expand_and_allocate(size_t word_size); + // Iteration. void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); } void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); }