8276055: ZGC: Defragment address space
Reviewed-by: eosterlund, stefank
This commit is contained in:
parent
d9b0138d7d
commit
1750a6e2c0
@ -85,7 +85,19 @@ void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
|
|||||||
_callbacks = callbacks;
|
_callbacks = callbacks;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
|
uintptr_t ZMemoryManager::peek_low_address() const {
|
||||||
|
ZLocker<ZLock> locker(&_lock);
|
||||||
|
|
||||||
|
const ZMemory* const area = _freelist.first();
|
||||||
|
if (area != NULL) {
|
||||||
|
return area->start();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Out of memory
|
||||||
|
return UINTPTR_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t ZMemoryManager::alloc_low_address(size_t size) {
|
||||||
ZLocker<ZLock> locker(&_lock);
|
ZLocker<ZLock> locker(&_lock);
|
||||||
|
|
||||||
ZListIterator<ZMemory> iter(&_freelist);
|
ZListIterator<ZMemory> iter(&_freelist);
|
||||||
@ -110,7 +122,7 @@ uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
|
|||||||
return UINTPTR_MAX;
|
return UINTPTR_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
|
uintptr_t ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) {
|
||||||
ZLocker<ZLock> locker(&_lock);
|
ZLocker<ZLock> locker(&_lock);
|
||||||
|
|
||||||
ZMemory* area = _freelist.first();
|
ZMemory* area = _freelist.first();
|
||||||
@ -136,7 +148,7 @@ uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocate
|
|||||||
return UINTPTR_MAX;
|
return UINTPTR_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
uintptr_t ZMemoryManager::alloc_high_address(size_t size) {
|
||||||
ZLocker<ZLock> locker(&_lock);
|
ZLocker<ZLock> locker(&_lock);
|
||||||
|
|
||||||
ZListReverseIterator<ZMemory> iter(&_freelist);
|
ZListReverseIterator<ZMemory> iter(&_freelist);
|
||||||
@ -160,7 +172,7 @@ uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
|||||||
return UINTPTR_MAX;
|
return UINTPTR_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
|
uintptr_t ZMemoryManager::alloc_high_address_at_most(size_t size, size_t* allocated) {
|
||||||
ZLocker<ZLock> locker(&_lock);
|
ZLocker<ZLock> locker(&_lock);
|
||||||
|
|
||||||
ZMemory* area = _freelist.last();
|
ZMemory* area = _freelist.last();
|
||||||
|
@ -66,7 +66,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ZLock _lock;
|
mutable ZLock _lock;
|
||||||
ZList<ZMemory> _freelist;
|
ZList<ZMemory> _freelist;
|
||||||
Callbacks _callbacks;
|
Callbacks _callbacks;
|
||||||
|
|
||||||
@ -82,10 +82,11 @@ public:
|
|||||||
|
|
||||||
void register_callbacks(const Callbacks& callbacks);
|
void register_callbacks(const Callbacks& callbacks);
|
||||||
|
|
||||||
uintptr_t alloc_from_front(size_t size);
|
uintptr_t peek_low_address() const;
|
||||||
uintptr_t alloc_from_front_at_most(size_t size, size_t* allocated);
|
uintptr_t alloc_low_address(size_t size);
|
||||||
uintptr_t alloc_from_back(size_t size);
|
uintptr_t alloc_low_address_at_most(size_t size, size_t* allocated);
|
||||||
uintptr_t alloc_from_back_at_most(size_t size, size_t* allocated);
|
uintptr_t alloc_high_address(size_t size);
|
||||||
|
uintptr_t alloc_high_address_at_most(size_t size, size_t* allocated);
|
||||||
|
|
||||||
void free(uintptr_t start, size_t size);
|
void free(uintptr_t start, size_t size);
|
||||||
};
|
};
|
||||||
|
@ -48,6 +48,7 @@
|
|||||||
|
|
||||||
static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
|
static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
|
||||||
static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
|
static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
|
||||||
|
static const ZStatCounter ZCounterDefragment("Memory", "Defragment", ZStatUnitOpsPerSecond);
|
||||||
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
|
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
|
||||||
|
|
||||||
enum ZPageAllocationStall {
|
enum ZPageAllocationStall {
|
||||||
@ -559,12 +560,43 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
|
|||||||
return new ZPage(allocation->type(), vmem, pmem);
|
return new ZPage(allocation->type(), vmem, pmem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_alloc_satisfied(ZPageAllocation* allocation) {
|
bool ZPageAllocator::should_defragment(const ZPage* page) const {
|
||||||
|
// A small page can end up at a high address (second half of the address space)
|
||||||
|
// if we've split a larger page or we have a constrained address space. To help
|
||||||
|
// fight address space fragmentation we remap such pages to a lower address, if
|
||||||
|
// a lower address is available.
|
||||||
|
return page->type() == ZPageTypeSmall &&
|
||||||
|
page->start() >= _virtual.reserved() / 2 &&
|
||||||
|
page->start() > _virtual.lowest_available_address();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
|
||||||
// The allocation is immediately satisfied if the list of pages contains
|
// The allocation is immediately satisfied if the list of pages contains
|
||||||
// exactly one page, with the type and size that was requested.
|
// exactly one page, with the type and size that was requested. However,
|
||||||
return allocation->pages()->size() == 1 &&
|
// even if the allocation is immediately satisfied we might still want to
|
||||||
allocation->pages()->first()->type() == allocation->type() &&
|
// return false here to force the page to be remapped to fight address
|
||||||
allocation->pages()->first()->size() == allocation->size();
|
// space fragmentation.
|
||||||
|
|
||||||
|
if (allocation->pages()->size() != 1) {
|
||||||
|
// Not a single page
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ZPage* const page = allocation->pages()->first();
|
||||||
|
if (page->type() != allocation->type() ||
|
||||||
|
page->size() != allocation->size()) {
|
||||||
|
// Wrong type or size
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (should_defragment(page)) {
|
||||||
|
// Defragment address space
|
||||||
|
ZStatInc(ZCounterDefragment);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocation immediately satisfied
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
|
ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
|
||||||
|
@ -89,6 +89,8 @@ private:
|
|||||||
bool alloc_page_common(ZPageAllocation* allocation);
|
bool alloc_page_common(ZPageAllocation* allocation);
|
||||||
bool alloc_page_stall(ZPageAllocation* allocation);
|
bool alloc_page_stall(ZPageAllocation* allocation);
|
||||||
bool alloc_page_or_stall(ZPageAllocation* allocation);
|
bool alloc_page_or_stall(ZPageAllocation* allocation);
|
||||||
|
bool should_defragment(const ZPage* page) const;
|
||||||
|
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
|
||||||
ZPage* alloc_page_create(ZPageAllocation* allocation);
|
ZPage* alloc_page_create(ZPageAllocation* allocation);
|
||||||
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
|
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
|
||||||
void alloc_page_failed(ZPageAllocation* allocation);
|
void alloc_page_failed(ZPageAllocation* allocation);
|
||||||
|
@ -295,7 +295,7 @@ void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
|||||||
// Allocate segments
|
// Allocate segments
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
size_t allocated = 0;
|
size_t allocated = 0;
|
||||||
const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
|
const uintptr_t start = _manager.alloc_low_address_at_most(size, &allocated);
|
||||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||||
pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
|
pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
|
||||||
size -= allocated;
|
size -= allocated;
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
|
|
||||||
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
|
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
|
||||||
_manager(),
|
_manager(),
|
||||||
|
_reserved(0),
|
||||||
_initialized(false) {
|
_initialized(false) {
|
||||||
|
|
||||||
// Check max supported heap size
|
// Check max supported heap size
|
||||||
@ -173,6 +174,9 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
|
|||||||
log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
|
log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
|
||||||
reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
|
reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
|
||||||
|
|
||||||
|
// Record reserved
|
||||||
|
_reserved = reserved;
|
||||||
|
|
||||||
return reserved >= max_capacity;
|
return reserved >= max_capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,9 +195,9 @@ ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address)
|
|||||||
// Small pages are allocated at low addresses, while medium/large pages
|
// Small pages are allocated at low addresses, while medium/large pages
|
||||||
// are allocated at high addresses (unless forced to be at a low address).
|
// are allocated at high addresses (unless forced to be at a low address).
|
||||||
if (force_low_address || size <= ZPageSizeSmall) {
|
if (force_low_address || size <= ZPageSizeSmall) {
|
||||||
start = _manager.alloc_from_front(size);
|
start = _manager.alloc_low_address(size);
|
||||||
} else {
|
} else {
|
||||||
start = _manager.alloc_from_back(size);
|
start = _manager.alloc_high_address(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ZVirtualMemory(start, size);
|
return ZVirtualMemory(start, size);
|
||||||
|
@ -48,6 +48,7 @@ public:
|
|||||||
class ZVirtualMemoryManager {
|
class ZVirtualMemoryManager {
|
||||||
private:
|
private:
|
||||||
ZMemoryManager _manager;
|
ZMemoryManager _manager;
|
||||||
|
uintptr_t _reserved;
|
||||||
bool _initialized;
|
bool _initialized;
|
||||||
|
|
||||||
// Platform specific implementation
|
// Platform specific implementation
|
||||||
@ -69,6 +70,9 @@ public:
|
|||||||
|
|
||||||
bool is_initialized() const;
|
bool is_initialized() const;
|
||||||
|
|
||||||
|
size_t reserved() const;
|
||||||
|
uintptr_t lowest_available_address() const;
|
||||||
|
|
||||||
ZVirtualMemory alloc(size_t size, bool force_low_address);
|
ZVirtualMemory alloc(size_t size, bool force_low_address);
|
||||||
void free(const ZVirtualMemory& vmem);
|
void free(const ZVirtualMemory& vmem);
|
||||||
};
|
};
|
||||||
|
@ -57,4 +57,12 @@ inline ZVirtualMemory ZVirtualMemory::split(size_t size) {
|
|||||||
return ZVirtualMemory(_start - size, size);
|
return ZVirtualMemory(_start - size, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline size_t ZVirtualMemoryManager::reserved() const {
|
||||||
|
return _reserved;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline uintptr_t ZVirtualMemoryManager::lowest_available_address() const {
|
||||||
|
return _manager.peek_low_address();
|
||||||
|
}
|
||||||
|
|
||||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
|
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
|
||||||
|
Loading…
x
Reference in New Issue
Block a user