8310015: ZGC: Unbounded asynchronous unmapping can lead to running out of address space
Reviewed-by: stefank, aboldtch
This commit is contained in:
parent
266f9838ee
commit
4229baf9b6
@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/x/xList.inline.hpp"
|
||||
#include "gc/x/xLock.inline.hpp"
|
||||
#include "gc/x/xPage.inline.hpp"
|
||||
@ -35,6 +36,8 @@ XUnmapper::XUnmapper(XPageAllocator* page_allocator) :
|
||||
_page_allocator(page_allocator),
|
||||
_lock(),
|
||||
_queue(),
|
||||
_enqueued_bytes(0),
|
||||
_warned_sync_unmapping(false),
|
||||
_stop(false) {
|
||||
set_name("XUnmapper");
|
||||
create_and_start();
|
||||
@ -50,6 +53,7 @@ XPage* XUnmapper::dequeue() {
|
||||
|
||||
XPage* const page = _queue.remove_first();
|
||||
if (page != nullptr) {
|
||||
_enqueued_bytes -= page->size();
|
||||
return page;
|
||||
}
|
||||
|
||||
@ -57,6 +61,42 @@ XPage* XUnmapper::dequeue() {
|
||||
}
|
||||
}
|
||||
|
||||
bool XUnmapper::try_enqueue(XPage* page) {
|
||||
if (ZVerifyViews) {
|
||||
// Asynchronous unmap and destroy is not supported with ZVerifyViews
|
||||
return false;
|
||||
}
|
||||
|
||||
// Enqueue for asynchronous unmap and destroy
|
||||
XLocker<XConditionLock> locker(&_lock);
|
||||
if (is_saturated()) {
|
||||
// The unmapper thread is lagging behind and is unable to unmap memory fast enough
|
||||
if (!_warned_sync_unmapping) {
|
||||
_warned_sync_unmapping = true;
|
||||
log_warning_p(gc)("WARNING: Encountered synchronous unmapping because asynchronous unmapping could not keep up");
|
||||
}
|
||||
log_debug(gc, unmap)("Synchronous unmapping " SIZE_FORMAT "M page", page->size() / M);
|
||||
return false;
|
||||
}
|
||||
|
||||
log_trace(gc, unmap)("Asynchronous unmapping " SIZE_FORMAT "M page (" SIZE_FORMAT "M / " SIZE_FORMAT "M enqueued)",
|
||||
page->size() / M, _enqueued_bytes / M, queue_capacity() / M);
|
||||
|
||||
_queue.insert_last(page);
|
||||
_enqueued_bytes += page->size();
|
||||
_lock.notify_all();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t XUnmapper::queue_capacity() const {
|
||||
return align_up<size_t>(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0, XGranuleSize);
|
||||
}
|
||||
|
||||
bool XUnmapper::is_saturated() const {
|
||||
return _enqueued_bytes >= queue_capacity();
|
||||
}
|
||||
|
||||
void XUnmapper::do_unmap_and_destroy_page(XPage* page) const {
|
||||
EventZUnmap event;
|
||||
const size_t unmapped = page->size();
|
||||
@ -70,15 +110,9 @@ void XUnmapper::do_unmap_and_destroy_page(XPage* page) const {
|
||||
}
|
||||
|
||||
void XUnmapper::unmap_and_destroy_page(XPage* page) {
|
||||
// Asynchronous unmap and destroy is not supported with ZVerifyViews
|
||||
if (ZVerifyViews) {
|
||||
// Immediately unmap and destroy
|
||||
if (!try_enqueue(page)) {
|
||||
// Synchronously unmap and destroy
|
||||
do_unmap_and_destroy_page(page);
|
||||
} else {
|
||||
// Enqueue for asynchronous unmap and destroy
|
||||
XLocker<XConditionLock> locker(&_lock);
|
||||
_queue.insert_last(page);
|
||||
_lock.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,9 +36,14 @@ private:
|
||||
XPageAllocator* const _page_allocator;
|
||||
XConditionLock _lock;
|
||||
XList<XPage> _queue;
|
||||
size_t _enqueued_bytes;
|
||||
bool _warned_sync_unmapping;
|
||||
bool _stop;
|
||||
|
||||
XPage* dequeue();
|
||||
bool try_enqueue(XPage* page);
|
||||
size_t queue_capacity() const;
|
||||
bool is_saturated() const;
|
||||
void do_unmap_and_destroy_page(XPage* page) const;
|
||||
|
||||
protected:
|
||||
|
@ -61,6 +61,13 @@
|
||||
"Uncommit memory if it has been unused for the specified " \
|
||||
"amount of time (in seconds)") \
|
||||
\
|
||||
product(double, ZAsyncUnmappingLimit, 100.0, DIAGNOSTIC, \
|
||||
"Specify the max amount (percentage of max heap size) of async " \
|
||||
"unmapping that can be in-flight before unmapping requests are " \
|
||||
"temporarily forced to be synchronous instead. " \
|
||||
"The default means after an amount of pages proportional to the " \
|
||||
"max capacity is enqueued, we resort to synchronous unmapping.") \
|
||||
\
|
||||
product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \
|
||||
"Time between statistics print outs (in seconds)") \
|
||||
range(1, (uint)-1) \
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
@ -35,6 +36,8 @@ ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator)
|
||||
: _page_allocator(page_allocator),
|
||||
_lock(),
|
||||
_queue(),
|
||||
_enqueued_bytes(0),
|
||||
_warned_sync_unmapping(false),
|
||||
_stop(false) {
|
||||
set_name("ZUnmapper");
|
||||
create_and_start();
|
||||
@ -50,6 +53,7 @@ ZPage* ZUnmapper::dequeue() {
|
||||
|
||||
ZPage* const page = _queue.remove_first();
|
||||
if (page != nullptr) {
|
||||
_enqueued_bytes -= page->size();
|
||||
return page;
|
||||
}
|
||||
|
||||
@ -57,6 +61,37 @@ ZPage* ZUnmapper::dequeue() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ZUnmapper::try_enqueue(ZPage* page) {
|
||||
// Enqueue for asynchronous unmap and destroy
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
if (is_saturated()) {
|
||||
// The unmapper thread is lagging behind and is unable to unmap memory fast enough
|
||||
if (!_warned_sync_unmapping) {
|
||||
_warned_sync_unmapping = true;
|
||||
log_warning_p(gc)("WARNING: Encountered synchronous unmapping because asynchronous unmapping could not keep up");
|
||||
}
|
||||
log_debug(gc, unmap)("Synchronous unmapping " SIZE_FORMAT "M page", page->size() / M);
|
||||
return false;
|
||||
}
|
||||
|
||||
log_trace(gc, unmap)("Asynchronous unmapping " SIZE_FORMAT "M page (" SIZE_FORMAT "M / " SIZE_FORMAT "M enqueued)",
|
||||
page->size() / M, _enqueued_bytes / M, queue_capacity() / M);
|
||||
|
||||
_queue.insert_last(page);
|
||||
_enqueued_bytes += page->size();
|
||||
_lock.notify_all();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZUnmapper::queue_capacity() const {
|
||||
return align_up<size_t>(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0, ZGranuleSize);
|
||||
}
|
||||
|
||||
bool ZUnmapper::is_saturated() const {
|
||||
return _enqueued_bytes >= queue_capacity();
|
||||
}
|
||||
|
||||
void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
|
||||
EventZUnmap event;
|
||||
const size_t unmapped = page->size();
|
||||
@ -70,10 +105,10 @@ void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
|
||||
}
|
||||
|
||||
void ZUnmapper::unmap_and_destroy_page(ZPage* page) {
|
||||
// Enqueue for asynchronous unmap and destroy
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
_queue.insert_last(page);
|
||||
_lock.notify_all();
|
||||
if (!try_enqueue(page)) {
|
||||
// Synchronously unmap and destroy
|
||||
do_unmap_and_destroy_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZUnmapper::run_thread() {
|
||||
|
@ -36,9 +36,14 @@ private:
|
||||
ZPageAllocator* const _page_allocator;
|
||||
ZConditionLock _lock;
|
||||
ZList<ZPage> _queue;
|
||||
size_t _enqueued_bytes;
|
||||
bool _warned_sync_unmapping;
|
||||
bool _stop;
|
||||
|
||||
ZPage* dequeue();
|
||||
bool try_enqueue(ZPage* page);
|
||||
size_t queue_capacity() const;
|
||||
bool is_saturated() const;
|
||||
void do_unmap_and_destroy_page(ZPage* page) const;
|
||||
|
||||
protected:
|
||||
|
@ -197,6 +197,7 @@ class outputStream;
|
||||
LOG_TAG(tlab) \
|
||||
LOG_TAG(tracking) \
|
||||
LOG_TAG(unload) /* Trace unloading of classes */ \
|
||||
LOG_TAG(unmap) \
|
||||
LOG_TAG(unshareable) \
|
||||
NOT_PRODUCT(LOG_TAG(upcall)) \
|
||||
LOG_TAG(update) \
|
||||
|
Loading…
Reference in New Issue
Block a user