8246220: ZGC: Introduce ZUnmapper to asynchronous unmap pages

Reviewed-by: eosterlund, stefank
This commit is contained in:
Per Lidén 2020-06-09 11:01:09 +02:00
parent d7e68f375c
commit 9d0ba7ae7f
12 changed files with 372 additions and 42 deletions

View File

@ -37,6 +37,7 @@
#include "gc/z/zTask.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zUncommitter.hpp"
#include "gc/z/zUnmapper.hpp"
#include "gc/z/zWorkers.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
@ -149,6 +150,7 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers,
_reclaimed(0),
_stalled(),
_satisfied(),
_unmapper(new ZUnmapper(this)),
_uncommitter(new ZUncommitter(this)),
_safe_delete(),
_initialized(false) {
@ -381,12 +383,12 @@ void ZPageAllocator::uncommit_page(ZPage* page) {
void ZPageAllocator::map_page(const ZPage* page) const {
// Map physical memory
_physical.map(page->physical_memory(), page->start());
_physical.map(page->start(), page->physical_memory());
}
void ZPageAllocator::unmap_page(const ZPage* page) const {
// Unmap physical memory
_physical.unmap(page->physical_memory(), page->start());
_physical.unmap(page->start(), page->size());
}
void ZPageAllocator::destroy_page(ZPage* page) {
@ -550,6 +552,8 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
// Allocate virtual memory. To make error handling a lot more straight
// forward, we allocate virtual memory before destroying flushed pages.
// Flushed pages are also unmapped and destroyed asynchronously, so we
// can't immediately reuse that part of the address space anyway.
const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
if (vmem.is_null()) {
log_error(gc)("Out of address space");
@ -564,14 +568,13 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
for (ZPage* page; iter.next(&page);) {
flushed += page->size();
unmap_page(page);
// Harvest flushed physical memory
ZPhysicalMemory& fmem = page->physical_memory();
pmem.add_segments(fmem);
fmem.remove_segments();
destroy_page(page);
// Unmap and destroy page
_unmapper->unmap_and_destroy_page(page);
}
if (flushed > 0) {
@ -811,21 +814,21 @@ void ZPageAllocator::disable_deferred_delete() const {
void ZPageAllocator::debug_map_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_physical.debug_map(page->physical_memory(), page->start());
_physical.debug_map(page->start(), page->physical_memory());
}
void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_physical.debug_unmap(page->physical_memory(), page->start());
_physical.debug_unmap(page->start(), page->size());
}
void ZPageAllocator::pages_do(ZPageClosure* cl) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZListIterator<ZPageAllocation> iter(&_satisfied);
for (ZPageAllocation* allocation; iter.next(&allocation);) {
ZListIterator<ZPage> iter(allocation->pages());
for (ZPage* page; iter.next(&page);) {
ZListIterator<ZPageAllocation> iter_satisfied(&_satisfied);
for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) {
ZListIterator<ZPage> iter_pages(allocation->pages());
for (ZPage* page; iter_pages.next(&page);) {
cl->do_page(page);
}
}
@ -858,5 +861,6 @@ void ZPageAllocator::check_out_of_memory() {
}
void ZPageAllocator::threads_do(ThreadClosure* tc) const {
tc->do_thread(_unmapper);
tc->do_thread(_uncommitter);
}

View File

@ -36,9 +36,11 @@ class ThreadClosure;
class ZPageAllocation;
class ZWorkers;
class ZUncommitter;
class ZUnmapper;
class ZPageAllocator {
friend class VMStructs;
friend class ZUnmapper;
friend class ZUncommitter;
private:
@ -59,6 +61,7 @@ private:
ssize_t _reclaimed;
ZList<ZPageAllocation> _stalled;
ZList<ZPageAllocation> _satisfied;
ZUnmapper* _unmapper;
ZUncommitter* _uncommitter;
mutable ZSafeDelete<ZPage> _safe_delete;
bool _initialized;

View File

@ -319,17 +319,15 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
}
void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
// From an NMT point of view we treat the first heap view (marked0) as committed
const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}
void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
if (MemTracker::tracking_level() > NMT_minimal) {
const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
}
@ -403,7 +401,7 @@ void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
size_t size = 0;
// Map segments
@ -422,8 +420,8 @@ void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t add
}
}
void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
_backing.unmap(addr, pmem.size());
void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
_backing.unmap(addr, size);
}
void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
@ -438,42 +436,44 @@ void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
}
}
void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
const size_t size = pmem.size();
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset));
map_view(ZAddress::good(offset), pmem);
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset));
map_view(pmem, ZAddress::marked1(offset));
map_view(pmem, ZAddress::remapped(offset));
map_view(ZAddress::marked0(offset), pmem);
map_view(ZAddress::marked1(offset), pmem);
map_view(ZAddress::remapped(offset), pmem);
}
nmt_commit(pmem, offset);
nmt_commit(offset, size);
}
void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
nmt_uncommit(pmem, offset);
void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
nmt_uncommit(offset, size);
if (ZVerifyViews) {
// Unmap good view
unmap_view(pmem, ZAddress::good(offset));
unmap_view(ZAddress::good(offset), size);
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
unmap_view(ZAddress::marked0(offset), size);
unmap_view(ZAddress::marked1(offset), size);
unmap_view(ZAddress::remapped(offset), size);
}
}
void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset));
map_view(ZAddress::good(offset), pmem);
}
void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
unmap_view(pmem, ZAddress::good(offset));
unmap_view(ZAddress::good(offset), size);
}

View File

@ -85,12 +85,12 @@ private:
ZPhysicalMemoryBacking _backing;
ZMemoryManager _manager;
void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void nmt_commit(uintptr_t offset, size_t size) const;
void nmt_uncommit(uintptr_t offset, size_t size) const;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const;
void unmap_view(uintptr_t addr, size_t size) const;
public:
ZPhysicalMemoryManager(size_t max_capacity);
@ -108,11 +108,11 @@ public:
void pretouch(uintptr_t offset, size_t size) const;
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
void unmap(uintptr_t offset, size_t size) const;
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
void debug_unmap(uintptr_t offset, size_t size) const;
};
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zUnmapper.hpp"
#include "jfr/jfrEvents.hpp"
#include "runtime/globals.hpp"
ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator) :
_page_allocator(page_allocator),
_lock(),
_queue(),
_stop(false) {
set_name("ZUnmapper");
create_and_start();
}
ZPage* ZUnmapper::dequeue() {
ZLocker<ZConditionLock> locker(&_lock);
for (;;) {
if (_stop) {
return NULL;
}
ZPage* const page = _queue.remove_first();
if (page != NULL) {
return page;
}
_lock.wait();
}
}
void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
EventZUnmap event;
const size_t unmapped = page->size();
// Unmap and destroy
_page_allocator->unmap_page(page);
_page_allocator->destroy_page(page);
// Send event
event.commit(unmapped);
}
void ZUnmapper::unmap_and_destroy_page(ZPage* page) {
// Asynchronous unmap and destroy is not supported with ZVerifyViews
if (ZVerifyViews) {
// Immediately unmap and destroy
do_unmap_and_destroy_page(page);
} else {
// Enqueue for asynchronous unmap and destroy
ZLocker<ZConditionLock> locker(&_lock);
_queue.insert_last(page);
_lock.notify_all();
}
}
void ZUnmapper::run_service() {
for (;;) {
ZPage* const page = dequeue();
if (page == NULL) {
// Stop
return;
}
do_unmap_and_destroy_page(page);
}
}
void ZUnmapper::stop_service() {
ZLocker<ZConditionLock> locker(&_lock);
_stop = true;
_lock.notify_all();
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZUNMAPPER_HPP
#define SHARE_GC_Z_ZUNMAPPER_HPP
#include "gc/z/zList.hpp"
#include "gc/z/zLock.hpp"
#include "gc/shared/concurrentGCThread.hpp"
class ZPage;
class ZPageAllocator;
class ZUnmapper : public ConcurrentGCThread {
private:
ZPageAllocator* const _page_allocator;
ZConditionLock _lock;
ZList<ZPage> _queue;
bool _stop;
ZPage* dequeue();
void do_unmap_and_destroy_page(ZPage* page) const;
protected:
virtual void run_service();
virtual void stop_service();
public:
ZUnmapper(ZPageAllocator* page_allocator);
void unmap_and_destroy_page(ZPage* page);
};
#endif // SHARE_GC_Z_ZUNMAPPER_HPP

View File

@ -1045,6 +1045,10 @@
<Field type="ulong" contentType="bytes" name="uncommitted" label="Uncommitted" />
</Event>
<Event name="ZUnmap" category="Java Virtual Machine, GC, Detailed" label="ZGC Unmap" description="Unmapping of memory" thread="true">
<Field type="ulong" contentType="bytes" name="unmapped" label="Unmapped" />
</Event>
<Event name="ShenandoahHeapRegionStateChange" category="Java Virtual Machine, GC, Detailed" label="Shenandoah Heap Region State Change" description="Information about a Shenandoah heap region state change"
startTime="false">
<Field type="uint" name="index" label="Index" />

View File

@ -751,6 +751,11 @@
<setting name="threshold">0 ms</setting>
</event>
<event name="jdk.ZUnmap">
<setting name="enabled">true</setting>
<setting name="threshold">0 ms</setting>
</event>
<event name="jdk.Deoptimization">
<setting name="enabled">true</setting>
<setting name="stackTrace">false</setting>

View File

@ -751,6 +751,11 @@
<setting name="threshold">0 ms</setting>
</event>
<event name="jdk.ZUnmap">
<setting name="enabled">true</setting>
<setting name="threshold">0 ms</setting>
</event>
<event name="jdk.Deoptimization">
<setting name="enabled">true</setting>
<setting name="stackTrace">true</setting>

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package gc.z;
/*
* @test TestPageCacheFlush
* @requires vm.gc.Z & !vm.graal.enabled
* @summary Test ZGC page cache flushing
* @library /test/lib
* @run driver gc.z.TestPageCacheFlush
*/
import java.util.LinkedList;
import jdk.test.lib.process.ProcessTools;
public class TestPageCacheFlush {
static class Test {
private static final int K = 1024;
private static final int M = K * K;
private static volatile LinkedList<byte[]> keepAlive;
public static void fillPageCache(int size) {
System.out.println("Begin allocate (" + size + ")");
keepAlive = new LinkedList<>();
try {
for (;;) {
keepAlive.add(new byte[size]);
}
} catch (OutOfMemoryError e) {
keepAlive = null;
System.gc();
}
System.out.println("End allocate (" + size + ")");
}
public static void main(String[] args) throws Exception {
// Allocate small objects to fill the page cache with small pages
fillPageCache(10 * K);
// Allocate large objects to provoke page cache flushing to rebuild
// cached small pages into large pages
fillPageCache(10 * M);
}
}
public static void main(String[] args) throws Exception {
ProcessTools.executeProcess(ProcessTools.createJavaProcessBuilder(
"-XX:+UseZGC",
"-Xms128M",
"-Xmx128M",
"-Xlog:gc,gc+init,gc+heap=debug",
Test.class.getName()))
.outputTo(System.out)
.errorTo(System.out)
.shouldContain("Page Cache Flushed:")
.shouldHaveExitValue(0);
}
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jfr.event.gc.detailed;
import java.util.List;
import static gc.testlibrary.Allocation.blackHole;
import jdk.jfr.Recording;
import jdk.jfr.consumer.RecordedEvent;
import jdk.test.lib.jfr.EventNames;
import jdk.test.lib.jfr.Events;
/**
* @test TestZUnmapEvent
* @requires vm.hasJFR & vm.gc.Z
* @key jfr
* @library /test/lib /test/jdk /test/hotspot/jtreg
* @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZUnmapEvent
*/
public class TestZUnmapEvent {
public static void main(String[] args) throws Exception {
try (Recording recording = new Recording()) {
// Activate the event we are interested in and start recording
recording.enable(EventNames.ZUnmap);
recording.start();
// Allocate non-large objects, to fill page cache with non-large pages
for (int i = 0; i < 128; i++) {
blackHole(new byte[256 * 1024]);
}
// Allocate large objects, to provoke page cache flushing and unmapping
for (int i = 0; i < 10; i++) {
blackHole(new byte[7 * 1024 * 1024]);
}
// Wait for unmap to happen
Thread.sleep(10 * 1000);
recording.stop();
// Verify recording
List<RecordedEvent> events = Events.fromRecording(recording);
System.out.println("Events: " + events.size());
Events.hasEvents(events);
}
}
}

View File

@ -147,6 +147,7 @@ public class EventNames {
public final static String ZRelocationSet = PREFIX + "ZRelocationSet";
public final static String ZRelocationSetGroup = PREFIX + "ZRelocationSetGroup";
public final static String ZUncommit = PREFIX + "ZUncommit";
public final static String ZUnmap = PREFIX + "ZUnmap";
// Compiler
public final static String Compilation = PREFIX + "Compilation";