diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 02d20b92b5c..d1291e35bfc 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -518,13 +518,6 @@ void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool CollectedHeap::fill_with_object(start, end, zap); } -HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, - size_t requested_size, - size_t* actual_size) { - guarantee(false, "thread-local allocation buffers not supported"); - return nullptr; -} - void CollectedHeap::ensure_parsability(bool retire_tlabs) { assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), "Should only be called at a safepoint or at start-up"); diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 1562a4b43ad..511156170ee 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -149,7 +149,7 @@ class CollectedHeap : public CHeapObj { // returned in actual_size. virtual HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, - size_t* actual_size); + size_t* actual_size) = 0; // Reinitialize tlabs before resuming mutators. virtual void resize_all_tlabs(); @@ -345,10 +345,7 @@ protected: // An estimate of the maximum allocation that could be performed // for thread-local allocation buffers without triggering any // collection or expansion activity. - virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { - guarantee(false, "thread-local allocation buffers not supported"); - return 0; - } + virtual size_t unsafe_max_tlab_alloc(Thread *thr) const = 0; // If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent, // incremental and cooperative. In order for that to work well, mechanisms that stop