8222480: Implementation: JEP 351: ZGC: Uncommit Unused Memory
Reviewed-by: stefank, eosterlund
This commit is contained in:
parent
4bde1f6471
commit
44e7959bb8
src
hotspot
os/linux/gc/z
os_cpu/linux_x86/gc/z
zBackingFile_linux_x86.cppzBackingFile_linux_x86.hppzBackingPath_linux_x86.cppzPhysicalMemoryBacking_linux_x86.cppzPhysicalMemoryBacking_linux_x86.hpp
share/gc/z
vmStructs_z.hppzCollectedHeap.cppzCollectedHeap.hppzHeap.cppzHeap.hppzList.hppzList.inline.hppzLiveMap.cppzLiveMap.hppzLiveMap.inline.hppzMemory.cppzMemory.hppzPage.cppzPage.hppzPage.inline.hppzPageAllocator.cppzPageAllocator.hppzPageCache.cppzPageCache.hppzPageCache.inline.hppzPhysicalMemory.cppzPhysicalMemory.hppzPhysicalMemory.inline.hppzPreMappedMemory.cppzPreMappedMemory.hppzUncommitter.cppzUncommitter.hppzVirtualMemory.cppzVirtualMemory.hppzVirtualMemory.inline.hppz_globals.hpp
jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z
test/hotspot
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@
|
||||
#endif
|
||||
|
||||
static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
|
||||
return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
|
||||
return syscall(SYS_get_mempolicy, mode, nmask, maxnode, addr, flags);
|
||||
}
|
||||
|
||||
void ZNUMA::initialize_platform() {
|
||||
|
@ -26,8 +26,10 @@
|
||||
#include "gc/z/zBackingFile_linux_x86.hpp"
|
||||
#include "gc/z/zBackingPath_linux_x86.hpp"
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -36,9 +38,54 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statfs.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
// System calls
|
||||
#ifndef SYS_fallocate
|
||||
#define SYS_fallocate 285
|
||||
#endif
|
||||
#ifndef SYS_memfd_create
|
||||
#define SYS_memfd_create 319
|
||||
#endif
|
||||
|
||||
// memfd_create(2) flags
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
#ifndef MFD_HUGETLB
|
||||
#define MFD_HUGETLB 0x0004U
|
||||
#endif
|
||||
|
||||
// open(2) flags
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
// fallocate(2) flags
|
||||
#ifndef FALLOC_FL_KEEP_SIZE
|
||||
#define FALLOC_FL_KEEP_SIZE 0x01
|
||||
#endif
|
||||
#ifndef FALLOC_FL_PUNCH_HOLE
|
||||
#define FALLOC_FL_PUNCH_HOLE 0x02
|
||||
#endif
|
||||
|
||||
// Filesystem types, see statfs(2)
|
||||
#ifndef TMPFS_MAGIC
|
||||
#define TMPFS_MAGIC 0x01021994
|
||||
#endif
|
||||
#ifndef HUGETLBFS_MAGIC
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Filesystem names
|
||||
#define ZFILESYSTEM_TMPFS "tmpfs"
|
||||
#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
|
||||
@ -49,31 +96,6 @@
|
||||
// Java heap filename
|
||||
#define ZFILENAME_HEAP "java_heap"
|
||||
|
||||
// Support for building on older Linux systems
|
||||
#ifndef __NR_memfd_create
|
||||
#define __NR_memfd_create 319
|
||||
#endif
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
#ifndef MFD_HUGETLB
|
||||
#define MFD_HUGETLB 0x0004U
|
||||
#endif
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
// Filesystem types, see statfs(2)
|
||||
#ifndef TMPFS_MAGIC
|
||||
#define TMPFS_MAGIC 0x01021994
|
||||
#endif
|
||||
#ifndef HUGETLBFS_MAGIC
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Preferred tmpfs mount points, ordered by priority
|
||||
static const char* z_preferred_tmpfs_mountpoints[] = {
|
||||
"/dev/shm",
|
||||
@ -88,15 +110,22 @@ static const char* z_preferred_hugetlbfs_mountpoints[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
static int z_fallocate_hugetlbfs_attempts = 3;
|
||||
static bool z_fallocate_supported = true;
|
||||
|
||||
static int z_fallocate(int fd, int mode, size_t offset, size_t length) {
|
||||
return syscall(SYS_fallocate, fd, mode, offset, length);
|
||||
}
|
||||
|
||||
bool ZBackingFile::_hugetlbfs_mmap_retry = true;
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(SYS_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
ZBackingFile::ZBackingFile() :
|
||||
_fd(-1),
|
||||
_size(0),
|
||||
_filesystem(0),
|
||||
_block_size(0),
|
||||
_available(0),
|
||||
_initialized(false) {
|
||||
|
||||
@ -107,46 +136,53 @@ ZBackingFile::ZBackingFile() :
|
||||
}
|
||||
|
||||
// Get filesystem statistics
|
||||
struct statfs statfs_buf;
|
||||
if (fstatfs(_fd, &statfs_buf) == -1) {
|
||||
struct statfs buf;
|
||||
if (fstatfs(_fd, &buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)",
|
||||
err.to_string());
|
||||
log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
_filesystem = statfs_buf.f_type;
|
||||
_available = statfs_buf.f_bavail * statfs_buf.f_bsize;
|
||||
_filesystem = buf.f_type;
|
||||
_block_size = buf.f_bsize;
|
||||
_available = buf.f_bavail * _block_size;
|
||||
|
||||
// Make sure we're on a supported filesystem
|
||||
if (!is_tmpfs() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure the filesystem type matches requested large page type
|
||||
if (ZLargePages::is_transparent() && !is_tmpfs()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
log_error(gc)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
|
||||
"when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t expected_block_size = is_tmpfs() ? os::vm_page_size() : os::large_page_size();
|
||||
if (expected_block_size != _block_size) {
|
||||
log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
|
||||
is_tmpfs() ? ZFILESYSTEM_TMPFS : ZFILESYSTEM_HUGETLBFS, _block_size, expected_block_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -165,7 +201,7 @@ int ZBackingFile::create_mem_fd(const char* name) const {
|
||||
if (fd == -1) {
|
||||
ZErrno err;
|
||||
log_debug(gc, init)("Failed to create memfd file (%s)",
|
||||
((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
|
||||
((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -185,7 +221,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
// Find mountpoint
|
||||
ZBackingPath path(filesystem, preferred_mountpoints);
|
||||
if (path.get() == NULL) {
|
||||
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
log_error(gc)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -201,7 +237,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
struct stat stat_buf;
|
||||
if (fstat(fd_anon, &stat_buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
|
||||
log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -220,14 +256,14 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
|
||||
if (fd == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
|
||||
log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unlink file
|
||||
if (unlink(filename) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
|
||||
log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -262,6 +298,10 @@ int ZBackingFile::fd() const {
|
||||
return _fd;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::available() const {
|
||||
return _available;
|
||||
}
|
||||
@ -280,147 +320,271 @@ bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
|
||||
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
|
||||
// Try first smaller part.
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, alignment);
|
||||
if (!try_expand_tmpfs(offset0, length0, alignment)) {
|
||||
return false;
|
||||
ZErrno ZBackingFile::fallocate_compat_ftruncate(size_t size) const {
|
||||
while (ftruncate(_fd, size) == -1) {
|
||||
if (errno != EINTR) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
// Try second smaller part.
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const {
|
||||
// On hugetlbfs, mapping a file segment will fail immediately, without
|
||||
// the need to touch the mapped pages first, if there aren't enough huge
|
||||
// pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Once mapped, the huge pages are only reserved. We need to touch them
|
||||
// to associate them with the file segment. Note that we can not punch
|
||||
// hole in file segments which only have reserved pages.
|
||||
if (touch) {
|
||||
char* const start = (char*)addr;
|
||||
char* const end = start + length;
|
||||
os::pretouch_memory(start, end, _block_size);
|
||||
}
|
||||
|
||||
// Unmap again. From now on, the huge pages that were mapped are allocated
|
||||
// to this file. There's no risk in getting SIGBUS when touching them.
|
||||
if (munmap(addr, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_compat_pwrite(size_t offset, size_t length) const {
|
||||
uint8_t data = 0;
|
||||
|
||||
// Allocate backing memory by writing to each block
|
||||
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole_compat(size_t offset, size_t length) {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs).
|
||||
|
||||
const size_t end = offset + length;
|
||||
if (end > _size) {
|
||||
// Increase file size
|
||||
const ZErrno err = fallocate_compat_ftruncate(end);
|
||||
if (err) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate backing memory
|
||||
const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */)
|
||||
: fallocate_compat_pwrite(offset, length);
|
||||
if (err) {
|
||||
if (end > _size) {
|
||||
// Restore file size
|
||||
fallocate_compat_ftruncate(_size);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
|
||||
if (end > _size) {
|
||||
// Record new file size
|
||||
_size = end;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole_syscall(size_t offset, size_t length) {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = z_fallocate(_fd, mode, offset, length);
|
||||
if (res == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
const size_t end = offset + length;
|
||||
if (end > _size) {
|
||||
// Record new file size
|
||||
_size = end;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole(size_t offset, size_t length) {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
// some point touch these segments, otherwise we can not punch hole in them.
|
||||
if (z_fallocate_supported && !is_hugetlbfs()) {
|
||||
const ZErrno err = fallocate_fill_hole_syscall(offset, length);
|
||||
if (!err) {
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (err != ENOSYS && err != EOPNOTSUPP) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
log_debug(gc)("Falling back to fallocate() compatibility mode");
|
||||
z_fallocate_supported = false;
|
||||
}
|
||||
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_punch_hole(size_t offset, size_t length) {
|
||||
if (is_hugetlbfs()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
// segment. We don't know which pages have been previously touched, so
|
||||
// we always touch them here to guarantee that we can punch hole.
|
||||
const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */);
|
||||
if (err) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
|
||||
if (z_fallocate(_fd, mode, offset, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
// Try first half
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
const ZErrno err0 = fallocate(punch_hole, offset0, length0);
|
||||
if (err0) {
|
||||
return err0;
|
||||
}
|
||||
|
||||
// Try second half
|
||||
const size_t offset1 = offset0 + length0;
|
||||
const size_t length1 = length - length0;
|
||||
if (!try_expand_tmpfs(offset1, length1, alignment)) {
|
||||
return false;
|
||||
const ZErrno err1 = fallocate(punch_hole, offset1, length1);
|
||||
if (err1) {
|
||||
return err1;
|
||||
}
|
||||
|
||||
return true;
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(length > 0, "Invalid length");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
ZErrno ZBackingFile::fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
assert(is_aligned(offset, _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
ZErrno err = posix_fallocate(_fd, offset, length);
|
||||
|
||||
if (err == EINTR && length > alignment) {
|
||||
// Calling posix_fallocate() with a large length can take a long
|
||||
// time to complete. When running profilers, such as VTune, this
|
||||
// syscall will be constantly interrupted by signals. Expanding
|
||||
// the file in smaller steps avoids this problem.
|
||||
return try_split_and_expand_tmpfs(offset, length, alignment);
|
||||
const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
|
||||
if (err == EINTR && length > _block_size) {
|
||||
// Calling fallocate(2) with a large length can take a long time to
|
||||
// complete. When running profilers, such as VTune, this syscall will
|
||||
// be constantly interrupted by signals. Expanding the file in smaller
|
||||
// steps avoids this problem.
|
||||
return split_and_fallocate(punch_hole, offset, length);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool ZBackingFile::commit_inner(size_t offset, size_t length) {
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
retry:
|
||||
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
|
||||
if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) {
|
||||
// If we fail to allocate during initialization, due to lack of space on
|
||||
// the hugetlbfs filesystem, then we wait and retry a few times before
|
||||
// giving up. Otherwise there is a risk that running JVMs back-to-back
|
||||
// will fail, since there is a delay between process termination and the
|
||||
// huge pages owned by that process being returned to the huge page pool
|
||||
// and made available for new allocations.
|
||||
log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
|
||||
|
||||
// Wait and retry in one second, in the hope that huge pages will be
|
||||
// available by then.
|
||||
sleep(1);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
// Failed
|
||||
log_error(gc)("Failed to commit memory (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length) const {
|
||||
assert(is_tmpfs(), "Wrong filesystem");
|
||||
return try_expand_tmpfs(offset, length, os::vm_page_size());
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
assert(is_hugetlbfs(), "Wrong filesystem");
|
||||
|
||||
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
|
||||
// Instead of posix_fallocate() we can use a well-known workaround,
|
||||
// which involves truncating the file to requested size and then try
|
||||
// to map it to verify that there are enough huge pages available to
|
||||
// back it.
|
||||
while (ftruncate(_fd, offset + length) == -1) {
|
||||
ZErrno err;
|
||||
if (err != EINTR) {
|
||||
log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
size_t ZBackingFile::commit(size_t offset, size_t length) {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
return length;
|
||||
}
|
||||
|
||||
// If we fail mapping during initialization, i.e. when we are pre-mapping
|
||||
// the heap, then we wait and retry a few times before giving up. Otherwise
|
||||
// there is a risk that running JVMs back-to-back will fail, since there
|
||||
// is a delay between process termination and the huge pages owned by that
|
||||
// process being returned to the huge page pool and made available for new
|
||||
// allocations.
|
||||
void* addr = MAP_FAILED;
|
||||
const int max_attempts = 5;
|
||||
for (int attempt = 1; attempt <= max_attempts; attempt++) {
|
||||
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr != MAP_FAILED || !_hugetlbfs_mmap_retry) {
|
||||
// Mapping was successful or mmap retry is disabled
|
||||
break;
|
||||
}
|
||||
|
||||
ZErrno err;
|
||||
log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
|
||||
err.to_string(), attempt, max_attempts);
|
||||
|
||||
// Wait and retry in one second, in the hope that
|
||||
// huge pages will be available by then.
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
// Disable mmap retry from now on
|
||||
if (_hugetlbfs_mmap_retry) {
|
||||
_hugetlbfs_mmap_retry = false;
|
||||
}
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
// Not enough huge pages left
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to map backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Successful mapping, unmap again. From now on the pages we mapped
|
||||
// will be reserved for this file.
|
||||
if (munmap(addr, length) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(is_aligned(offset, alignment), "Invalid offset");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
|
||||
log_debug(gc)("Expanding heap from " SIZE_FORMAT "M to " SIZE_FORMAT "M", offset / M, (offset + length) / M);
|
||||
|
||||
return is_hugetlbfs() ? try_expand_hugetlbfs(offset, length) : try_expand_tmpfs(offset, length);
|
||||
}
|
||||
|
||||
size_t ZBackingFile::try_expand(size_t offset, size_t length, size_t alignment) const {
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
// Try to expand
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success
|
||||
return end;
|
||||
}
|
||||
|
||||
// Failed, try to expand as much as possible
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, alignment);
|
||||
if (length < alignment) {
|
||||
// Done, don't expand more
|
||||
return start;
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
if (length < ZGranuleSize) {
|
||||
// Done, don't commit more
|
||||
return start - offset;
|
||||
}
|
||||
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success, try expand more
|
||||
if (commit_inner(start, length)) {
|
||||
// Success, try commit more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try expand less
|
||||
// Failed, try commit less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZBackingFile::uncommit(size_t offset, size_t length) {
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
|
||||
return 0;
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
@ -26,12 +26,14 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZErrno;
|
||||
|
||||
class ZBackingFile {
|
||||
private:
|
||||
static bool _hugetlbfs_mmap_retry;
|
||||
|
||||
int _fd;
|
||||
size_t _size;
|
||||
uint64_t _filesystem;
|
||||
size_t _block_size;
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
@ -43,11 +45,17 @@ private:
|
||||
bool is_hugetlbfs() const;
|
||||
bool tmpfs_supports_transparent_huge_pages() const;
|
||||
|
||||
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length) const;
|
||||
bool try_expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const;
|
||||
ZErrno fallocate_compat_ftruncate(size_t size) const;
|
||||
ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const;
|
||||
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole(size_t offset, size_t length);
|
||||
ZErrno fallocate_punch_hole(size_t offset, size_t length);
|
||||
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
|
||||
bool commit_inner(size_t offset, size_t length);
|
||||
|
||||
public:
|
||||
ZBackingFile();
|
||||
@ -55,9 +63,11 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
int fd() const;
|
||||
size_t size() const;
|
||||
size_t available() const;
|
||||
|
||||
size_t try_expand(size_t offset, size_t length, size_t alignment) const;
|
||||
size_t commit(size_t offset, size_t length);
|
||||
size_t uncommit(size_t offset, size_t length);
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_GC_Z_ZBACKINGFILE_LINUX_X86_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,7 +72,7 @@ void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountp
|
||||
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
|
||||
if (fd == NULL) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
|
||||
log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -113,10 +113,10 @@ char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
|
||||
}
|
||||
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
log_error(gc)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
log_error(gc)(" %s", mountpoint);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -130,7 +130,7 @@ char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferr
|
||||
|
||||
if (mountpoints.size() == 0) {
|
||||
// No mount point found
|
||||
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
|
||||
log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.size() == 1) {
|
||||
// One mount point found
|
||||
path = strdup(mountpoints.at(0));
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -40,7 +41,11 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
// madvise(2) flags
|
||||
#ifndef MADV_HUGEPAGE
|
||||
#define MADV_HUGEPAGE 14
|
||||
#endif
|
||||
@ -48,22 +53,37 @@
|
||||
// Proc file entry for max map mount
|
||||
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
|
||||
|
||||
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
|
||||
_manager(),
|
||||
_file() {
|
||||
bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
}
|
||||
|
||||
if (!_file.is_initialized()) {
|
||||
void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check and warn if max map count is too low
|
||||
check_max_map_count(max_capacity);
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", available / M);
|
||||
|
||||
// Check and warn if available space on filesystem is too low
|
||||
check_available_space_on_filesystem(max_capacity);
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max) {
|
||||
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
|
||||
log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
|
||||
"(available", max / M);
|
||||
log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
|
||||
"size could", available / M);
|
||||
log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const {
|
||||
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
|
||||
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
|
||||
FILE* const file = fopen(filename, "r");
|
||||
if (file == NULL) {
|
||||
@ -86,62 +106,101 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const {
|
||||
// However, ZGC tends to create the most mappings and dominate the total count.
|
||||
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
|
||||
const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("The system limit on number of memory mappings per process might be too low "
|
||||
"for the given");
|
||||
log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max_capacity / M, filename);
|
||||
log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing "
|
||||
"execution with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given");
|
||||
log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max / M, filename);
|
||||
log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
|
||||
"with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_available_space_on_filesystem(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
|
||||
// Warn if available space is too low
|
||||
warn_available_space(max);
|
||||
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M",
|
||||
available / M);
|
||||
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max_capacity) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("Not enough space available on the backing filesystem to hold the current "
|
||||
"max Java heap");
|
||||
log_warning(gc, init)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem "
|
||||
"accordingly (available", max_capacity / M);
|
||||
log_warning(gc, init)("space is currently " SIZE_FORMAT "M). Continuing execution with the current "
|
||||
"filesystem size could", available / M);
|
||||
log_warning(gc, init)("lead to a premature OutOfMemoryError being thrown, due to failure to map "
|
||||
"memory.");
|
||||
}
|
||||
// Warn if max map count is too low
|
||||
warn_max_map_count(max);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
bool ZPhysicalMemoryBacking::supports_uncommit() {
|
||||
assert(!is_init_completed(), "Invalid state");
|
||||
assert(_file.size() >= ZGranuleSize, "Invalid size");
|
||||
|
||||
// Test if uncommit is supported by uncommitting and then re-committing a granule
|
||||
return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) {
|
||||
assert(old_capacity < new_capacity, "Invalid old/new capacity");
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t size) {
|
||||
size_t committed = 0;
|
||||
|
||||
const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, ZGranuleSize);
|
||||
if (capacity > old_capacity) {
|
||||
// Add expanded capacity to free list
|
||||
_manager.free(old_capacity, capacity - old_capacity);
|
||||
// Fill holes in the backing file
|
||||
while (committed < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - committed;
|
||||
const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
|
||||
if (start == UINTPTR_MAX) {
|
||||
// No holes to commit
|
||||
break;
|
||||
}
|
||||
|
||||
// Try commit hole
|
||||
const size_t filled = _file.commit(start, allocated);
|
||||
if (filled > 0) {
|
||||
// Successful or partialy successful
|
||||
_committed.free(start, filled);
|
||||
committed += filled;
|
||||
}
|
||||
if (filled < allocated) {
|
||||
// Failed or partialy failed
|
||||
_uncommitted.free(start + filled, allocated - filled);
|
||||
return committed;
|
||||
}
|
||||
}
|
||||
|
||||
return capacity;
|
||||
// Expand backing file
|
||||
if (committed < size) {
|
||||
const size_t remaining = size - committed;
|
||||
const uintptr_t start = _file.size();
|
||||
const size_t expanded = _file.commit(start, remaining);
|
||||
if (expanded > 0) {
|
||||
// Successful or partialy successful
|
||||
_committed.free(start, expanded);
|
||||
committed += expanded;
|
||||
}
|
||||
}
|
||||
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
|
||||
size_t uncommitted = 0;
|
||||
|
||||
// Punch holes in backing file
|
||||
while (uncommitted < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - uncommitted;
|
||||
const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
|
||||
// Try punch hole
|
||||
const size_t punched = _file.uncommit(start, allocated);
|
||||
if (punched > 0) {
|
||||
// Successful or partialy successful
|
||||
_uncommitted.free(start, punched);
|
||||
uncommitted += punched;
|
||||
}
|
||||
if (punched < allocated) {
|
||||
// Failed or partialy failed
|
||||
_committed.free(start + punched, allocated - punched);
|
||||
return uncommitted;
|
||||
}
|
||||
}
|
||||
|
||||
return uncommitted;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
@ -151,7 +210,7 @@ ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
|
||||
// Allocate segments
|
||||
for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
|
||||
const uintptr_t start = _manager.alloc_from_front(ZGranuleSize);
|
||||
const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
|
||||
}
|
||||
@ -159,13 +218,13 @@ ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
return pmem;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
|
||||
void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
|
||||
// Free segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
const ZPhysicalMemorySegment segment = pmem.segment(i);
|
||||
_manager.free(segment.start(), segment.size());
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_committed.free(segment.start(), segment.size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,10 +237,10 @@ void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
|
||||
if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
|
||||
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size, int advice) const {
|
||||
if (madvise((void*)addr, size, advice) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
|
||||
log_error(gc)("Failed to advise on memory (advice %d, %s)", advice, err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,41 +249,42 @@ void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
|
||||
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
|
||||
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
size_t size = 0;
|
||||
|
||||
// Map segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
const ZPhysicalMemorySegment segment = pmem.segment(i);
|
||||
const size_t size = segment.size();
|
||||
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
const uintptr_t segment_addr = addr + size;
|
||||
const void* const res = mmap((void*)segment_addr, segment.size(), PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
map_failed(err);
|
||||
}
|
||||
|
||||
// Advise on use of transparent huge pages before touching it
|
||||
if (ZLargePages::is_transparent()) {
|
||||
advise_view(addr, size);
|
||||
}
|
||||
size += segment.size();
|
||||
}
|
||||
|
||||
// NUMA interleave memory before touching it
|
||||
ZNUMA::memory_interleave(addr, size);
|
||||
// Advise on use of transparent huge pages before touching it
|
||||
if (ZLargePages::is_transparent()) {
|
||||
advise_view(addr, size, MADV_HUGEPAGE);
|
||||
}
|
||||
|
||||
if (pretouch) {
|
||||
pretouch_view(addr, size);
|
||||
}
|
||||
// NUMA interleave memory before touching it
|
||||
ZNUMA::memory_interleave(addr, size);
|
||||
|
||||
addr += size;
|
||||
// Pre-touch memory
|
||||
if (pretouch) {
|
||||
pretouch_view(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
|
||||
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const size_t size = pmem.size();
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
const void* const res = mmap((void*)addr, pmem.size(), PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
map_failed(err);
|
||||
@ -232,11 +292,11 @@ void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) co
|
||||
}
|
||||
|
||||
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
|
||||
// From an NMT point of view we treat the first heap mapping (marked0) as committed
|
||||
// From an NMT point of view we treat the first heap view (marked0) as committed
|
||||
return ZAddress::marked0(offset);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
if (ZVerifyViews) {
|
||||
// Map good view
|
||||
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
|
||||
@ -248,7 +308,7 @@ void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap good view
|
||||
unmap_view(pmem, ZAddress::good(offset));
|
||||
@ -260,13 +320,13 @@ void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::debug_map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
// Map good view
|
||||
assert(ZVerifyViews, "Should be enabled");
|
||||
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
// Unmap good view
|
||||
assert(ZVerifyViews, "Should be enabled");
|
||||
unmap_view(pmem, ZAddress::good(offset));
|
||||
|
@ -32,35 +32,39 @@ class ZPhysicalMemory;
|
||||
|
||||
class ZPhysicalMemoryBacking {
|
||||
private:
|
||||
ZMemoryManager _manager;
|
||||
ZBackingFile _file;
|
||||
ZMemoryManager _committed;
|
||||
ZMemoryManager _uncommitted;
|
||||
|
||||
void warn_available_space(size_t max) const;
|
||||
void warn_max_map_count(size_t max) const;
|
||||
|
||||
void check_max_map_count(size_t max_capacity) const;
|
||||
void check_available_space_on_filesystem(size_t max_capacity) const;
|
||||
void map_failed(ZErrno err) const;
|
||||
|
||||
void advise_view(uintptr_t addr, size_t size) const;
|
||||
void advise_view(uintptr_t addr, size_t size, int advice) const;
|
||||
void pretouch_view(uintptr_t addr, size_t size) const;
|
||||
void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
|
||||
void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
|
||||
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
|
||||
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t try_expand(size_t old_capacity, size_t new_capacity);
|
||||
void warn_commit_limits(size_t max) const;
|
||||
bool supports_uncommit();
|
||||
|
||||
size_t commit(size_t size);
|
||||
size_t uncommit(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
void free(const ZPhysicalMemory& pmem);
|
||||
|
||||
uintptr_t nmt_address(uintptr_t offset) const;
|
||||
|
||||
void map(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
|
||||
void debug_map(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "gc/z/zGranuleMap.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Expose some ZGC globals to the SA agent.
|
||||
@ -77,20 +76,18 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
|
||||
volatile_nonstatic_field(ZPage, _top, uintptr_t) \
|
||||
\
|
||||
nonstatic_field(ZPageAllocator, _physical, ZPhysicalMemoryManager) \
|
||||
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
|
||||
nonstatic_field(ZPageAllocator, _capacity, size_t) \
|
||||
nonstatic_field(ZPageAllocator, _used, size_t) \
|
||||
\
|
||||
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
|
||||
\
|
||||
nonstatic_field(ZGranuleMapForPageTable, _map, ZPage** const) \
|
||||
\
|
||||
nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _start, const uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _end, const uintptr_t) \
|
||||
\
|
||||
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \
|
||||
\
|
||||
nonstatic_field(ZPhysicalMemoryManager, _max_capacity, const size_t) \
|
||||
nonstatic_field(ZPhysicalMemoryManager, _capacity, size_t)
|
||||
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding)
|
||||
|
||||
#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \
|
||||
declare_constant(ZPhaseRelocate) \
|
||||
|
@ -48,6 +48,7 @@ ZCollectedHeap::ZCollectedHeap() :
|
||||
_heap(),
|
||||
_director(new ZDirector()),
|
||||
_driver(new ZDriver()),
|
||||
_uncommitter(new ZUncommitter()),
|
||||
_stat(new ZStat()),
|
||||
_runtime_workers() {}
|
||||
|
||||
@ -77,6 +78,7 @@ void ZCollectedHeap::initialize_serviceability() {
|
||||
void ZCollectedHeap::stop() {
|
||||
_director->stop();
|
||||
_driver->stop();
|
||||
_uncommitter->stop();
|
||||
_stat->stop();
|
||||
}
|
||||
|
||||
@ -272,6 +274,7 @@ jlong ZCollectedHeap::millis_since_last_gc() {
|
||||
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
tc->do_thread(_director);
|
||||
tc->do_thread(_driver);
|
||||
tc->do_thread(_uncommitter);
|
||||
tc->do_thread(_stat);
|
||||
_heap.worker_threads_do(tc);
|
||||
_runtime_workers.threads_do(tc);
|
||||
@ -331,6 +334,8 @@ void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||
st->cr();
|
||||
_driver->print_on(st);
|
||||
st->cr();
|
||||
_uncommitter->print_on(st);
|
||||
st->cr();
|
||||
_stat->print_on(st);
|
||||
st->cr();
|
||||
_heap.print_worker_threads_on(st);
|
||||
|
@ -29,10 +29,11 @@
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zDriver.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zRuntimeWorkers.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
|
||||
class ZCollectedHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
@ -44,6 +45,7 @@ private:
|
||||
ZHeap _heap;
|
||||
ZDirector* _director;
|
||||
ZDriver* _driver;
|
||||
ZUncommitter* _uncommitter;
|
||||
ZStat* _stat;
|
||||
ZRuntimeWorkers _runtime_workers;
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcArguments.hpp"
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
@ -45,6 +46,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -62,7 +64,7 @@ ZHeap* ZHeap::_heap = NULL;
|
||||
ZHeap::ZHeap() :
|
||||
_workers(),
|
||||
_object_allocator(_workers.nworkers()),
|
||||
_page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
|
||||
_page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
|
||||
_page_table(),
|
||||
_forwarding_table(),
|
||||
_mark(&_workers, &_page_table),
|
||||
@ -81,13 +83,15 @@ ZHeap::ZHeap() :
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_min_size() const {
|
||||
const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
|
||||
return MIN2(aligned_min_size, heap_max_size());
|
||||
return MinHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_initial_size() const {
|
||||
return InitialHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_size() const {
|
||||
const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
|
||||
return MIN2(aligned_max_size, ZAddressOffsetMax);
|
||||
return MaxHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_reserve_size() const {
|
||||
@ -102,7 +106,7 @@ bool ZHeap::is_initialized() const {
|
||||
}
|
||||
|
||||
size_t ZHeap::min_capacity() const {
|
||||
return heap_min_size();
|
||||
return _page_allocator.min_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::max_capacity() const {
|
||||
@ -250,10 +254,14 @@ void ZHeap::free_page(ZPage* page, bool reclaimed) {
|
||||
_page_allocator.free_page(page, reclaimed);
|
||||
}
|
||||
|
||||
uint64_t ZHeap::uncommit(uint64_t delay) {
|
||||
return _page_allocator.uncommit(delay);
|
||||
}
|
||||
|
||||
void ZHeap::before_flip() {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap all pages
|
||||
_page_allocator.unmap_all_pages();
|
||||
_page_allocator.debug_unmap_all_pages();
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,8 +270,9 @@ void ZHeap::after_flip() {
|
||||
// Map all pages
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
_page_allocator.map_page(page);
|
||||
_page_allocator.debug_map_page(page);
|
||||
}
|
||||
_page_allocator.debug_map_cached_pages();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,7 @@ private:
|
||||
ZServiceability _serviceability;
|
||||
|
||||
size_t heap_min_size() const;
|
||||
size_t heap_initial_size() const;
|
||||
size_t heap_max_size() const;
|
||||
size_t heap_max_reserve_size() const;
|
||||
|
||||
@ -129,6 +130,9 @@ public:
|
||||
void undo_alloc_page(ZPage* page);
|
||||
void free_page(ZPage* page, bool reclaimed);
|
||||
|
||||
// Uncommit memory
|
||||
uint64_t uncommit(uint64_t delay);
|
||||
|
||||
// Object allocation
|
||||
uintptr_t alloc_tlab(size_t size);
|
||||
uintptr_t alloc_object(size_t size);
|
||||
|
@ -210,11 +210,11 @@ public:
|
||||
template <typename T, bool forward>
|
||||
class ZListIteratorImpl : public StackObj {
|
||||
private:
|
||||
ZList<T>* const _list;
|
||||
T* _next;
|
||||
const ZList<T>* const _list;
|
||||
T* _next;
|
||||
|
||||
public:
|
||||
ZListIteratorImpl(ZList<T>* list);
|
||||
ZListIteratorImpl(const ZList<T>* list);
|
||||
|
||||
bool next(T** elem);
|
||||
};
|
||||
@ -226,14 +226,14 @@ public:
|
||||
template <typename T>
|
||||
class ZListIterator : public ZListIteratorImpl<T, ZLIST_FORWARD> {
|
||||
public:
|
||||
ZListIterator(ZList<T>* list) :
|
||||
ZListIterator(const ZList<T>* list) :
|
||||
ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZListReverseIterator : public ZListIteratorImpl<T, ZLIST_REVERSE> {
|
||||
public:
|
||||
ZListReverseIterator(ZList<T>* list) :
|
||||
ZListReverseIterator(const ZList<T>* list) :
|
||||
ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
|
||||
};
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "gc/z/zList.hpp"
|
||||
|
||||
template <typename T, bool forward>
|
||||
ZListIteratorImpl<T, forward>::ZListIteratorImpl(ZList<T>* list) :
|
||||
ZListIteratorImpl<T, forward>::ZListIteratorImpl(const ZList<T>* list) :
|
||||
_list(list),
|
||||
_next(forward ? list->first() : list->last()) {}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,15 +34,19 @@
|
||||
static const ZStatCounter ZCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", ZStatUnitOpsPerSecond);
|
||||
|
||||
static size_t bitmap_size(uint32_t size, size_t nsegments) {
|
||||
// We need at least one bit per segment
|
||||
return MAX2<size_t>(size, nsegments) * 2;
|
||||
}
|
||||
|
||||
ZLiveMap::ZLiveMap(uint32_t size) :
|
||||
_seqnum(0),
|
||||
_live_objects(0),
|
||||
_live_bytes(0),
|
||||
_segment_live_bits(0),
|
||||
_segment_claim_bits(0),
|
||||
// We need at least one bit per segment.
|
||||
_bitmap(MAX2<size_t>(size, nsegments) * 2),
|
||||
_shift(exact_log2(segment_size())) {}
|
||||
_bitmap(bitmap_size(size, nsegments)),
|
||||
_segment_shift(exact_log2(segment_size())) {}
|
||||
|
||||
void ZLiveMap::reset(size_t index) {
|
||||
const uint32_t seqnum_initializing = (uint32_t)-1;
|
||||
@ -121,3 +125,11 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
|
||||
const bool success = set_segment_live_atomic(segment);
|
||||
assert(success, "Should never fail");
|
||||
}
|
||||
|
||||
void ZLiveMap::resize(uint32_t size) {
|
||||
const size_t new_bitmap_size = bitmap_size(size, nsegments);
|
||||
if (_bitmap.size() != new_bitmap_size) {
|
||||
_bitmap.reinitialize(new_bitmap_size, false /* clear */);
|
||||
_segment_shift = exact_log2(segment_size());
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,13 +35,13 @@ class ZLiveMap {
|
||||
private:
|
||||
static const size_t nsegments = 64;
|
||||
|
||||
volatile uint32_t _seqnum; // Mark sequence number
|
||||
volatile uint32_t _live_objects; // Number of live objects
|
||||
volatile size_t _live_bytes; // Number of live bytes
|
||||
BitMap::bm_word_t _segment_live_bits; // Segment live bits
|
||||
BitMap::bm_word_t _segment_claim_bits; // Segment claim bits
|
||||
ZBitMap _bitmap; // Mark bitmap
|
||||
const size_t _shift; // Segment shift
|
||||
volatile uint32_t _seqnum;
|
||||
volatile uint32_t _live_objects;
|
||||
volatile size_t _live_bytes;
|
||||
BitMap::bm_word_t _segment_live_bits;
|
||||
BitMap::bm_word_t _segment_claim_bits;
|
||||
ZBitMap _bitmap;
|
||||
size_t _segment_shift;
|
||||
|
||||
const BitMapView segment_live_bits() const;
|
||||
const BitMapView segment_claim_bits() const;
|
||||
@ -72,6 +72,7 @@ public:
|
||||
ZLiveMap(uint32_t size);
|
||||
|
||||
void reset();
|
||||
void resize(uint32_t size);
|
||||
|
||||
bool is_marked() const;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -92,7 +92,7 @@ inline BitMap::idx_t ZLiveMap::segment_size() const {
|
||||
}
|
||||
|
||||
inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const {
|
||||
return index >> _shift;
|
||||
return index >> _segment_shift;
|
||||
}
|
||||
|
||||
inline bool ZLiveMap::get(size_t index) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,6 +49,30 @@ uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
|
||||
return UINTPTR_MAX;
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
|
||||
ZMemory* area = _freelist.first();
|
||||
if (area != NULL) {
|
||||
if (area->size() <= size) {
|
||||
// Smaller than or equal to requested, remove area
|
||||
const uintptr_t start = area->start();
|
||||
*allocated = area->size();
|
||||
_freelist.remove(area);
|
||||
delete area;
|
||||
return start;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
const uintptr_t start = area->start();
|
||||
area->shrink_from_front(size);
|
||||
*allocated = size;
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
*allocated = 0;
|
||||
return UINTPTR_MAX;
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
||||
ZListReverseIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
@ -71,6 +95,29 @@ uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
||||
return UINTPTR_MAX;
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
|
||||
ZMemory* area = _freelist.last();
|
||||
if (area != NULL) {
|
||||
if (area->size() <= size) {
|
||||
// Smaller than or equal to requested, remove area
|
||||
const uintptr_t start = area->start();
|
||||
*allocated = area->size();
|
||||
_freelist.remove(area);
|
||||
delete area;
|
||||
return start;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
area->shrink_from_back(size);
|
||||
*allocated = size;
|
||||
return area->end();
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
*allocated = 0;
|
||||
return UINTPTR_MAX;
|
||||
}
|
||||
|
||||
void ZMemoryManager::free(uintptr_t start, size_t size) {
|
||||
assert(start != UINTPTR_MAX, "Invalid address");
|
||||
const uintptr_t end = start + size;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,7 +54,9 @@ private:
|
||||
|
||||
public:
|
||||
uintptr_t alloc_from_front(size_t size);
|
||||
uintptr_t alloc_from_front_at_most(size_t size, size_t* allocated);
|
||||
uintptr_t alloc_from_back(size_t size);
|
||||
uintptr_t alloc_from_back_at_most(size_t size, size_t* allocated);
|
||||
void free(uintptr_t start, size_t size);
|
||||
};
|
||||
|
||||
|
@ -28,30 +28,72 @@
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ZPage::ZPage(uint8_t type, ZVirtualMemory vmem, ZPhysicalMemory pmem) :
|
||||
ZPage::ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) :
|
||||
_type(type_from_size(vmem.size())),
|
||||
_numa_id((uint8_t)-1),
|
||||
_seqnum(0),
|
||||
_virtual(vmem),
|
||||
_top(start()),
|
||||
_livemap(object_max_count()),
|
||||
_last_used(0),
|
||||
_physical(pmem) {
|
||||
assert_initialized();
|
||||
}
|
||||
|
||||
ZPage::ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) :
|
||||
_type(type),
|
||||
_numa_id((uint8_t)-1),
|
||||
_seqnum(0),
|
||||
_virtual(vmem),
|
||||
_top(start()),
|
||||
_livemap(object_max_count()),
|
||||
_last_used(0),
|
||||
_physical(pmem) {
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
assert(!_virtual.is_null(), "Should not be null");
|
||||
assert((type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
|
||||
(type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
|
||||
(type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
|
||||
"Page type/size mismatch");
|
||||
assert_initialized();
|
||||
}
|
||||
|
||||
ZPage::~ZPage() {
|
||||
assert(_physical.is_null(), "Should be null");
|
||||
void ZPage::assert_initialized() const {
|
||||
assert(!_virtual.is_null(), "Should not be null");
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
|
||||
(_type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
|
||||
(_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
|
||||
"Page type/size mismatch");
|
||||
}
|
||||
|
||||
void ZPage::reset() {
|
||||
_seqnum = ZGlobalSeqNum;
|
||||
_top = start();
|
||||
_livemap.reset();
|
||||
_last_used = 0;
|
||||
}
|
||||
|
||||
ZPage* ZPage::retype(uint8_t type) {
|
||||
assert(_type != type, "Invalid retype");
|
||||
_type = type;
|
||||
_livemap.resize(object_max_count());
|
||||
return this;
|
||||
}
|
||||
|
||||
ZPage* ZPage::split(size_t size) {
|
||||
return split(type_from_size(size), size);
|
||||
}
|
||||
|
||||
ZPage* ZPage::split(uint8_t type, size_t size) {
|
||||
assert(_virtual.size() > size, "Invalid split");
|
||||
|
||||
// Resize this page, keep _numa_id, _seqnum, and _last_used
|
||||
const ZVirtualMemory vmem = _virtual.split(size);
|
||||
const ZPhysicalMemory pmem = _physical.split(size);
|
||||
_type = type_from_size(_virtual.size());
|
||||
_top = start();
|
||||
_livemap.resize(object_max_count());
|
||||
|
||||
// Create new page, inherit _seqnum and _last_used
|
||||
ZPage* const page = new ZPage(type, vmem, pmem);
|
||||
page->_seqnum = _seqnum;
|
||||
page->_last_used = _last_used;
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPage::print_on(outputStream* out) const {
|
||||
|
@ -35,26 +35,27 @@ class ZPage : public CHeapObj<mtGC> {
|
||||
friend class ZList<ZPage>;
|
||||
|
||||
private:
|
||||
// Always hot
|
||||
const uint8_t _type; // Page type
|
||||
uint8_t _numa_id; // NUMA node affinity
|
||||
uint32_t _seqnum; // Allocation sequence number
|
||||
const ZVirtualMemory _virtual; // Virtual start/end address
|
||||
volatile uintptr_t _top; // Virtual top address
|
||||
ZLiveMap _livemap; // Live map
|
||||
uint8_t _type;
|
||||
uint8_t _numa_id;
|
||||
uint32_t _seqnum;
|
||||
ZVirtualMemory _virtual;
|
||||
volatile uintptr_t _top;
|
||||
ZLiveMap _livemap;
|
||||
uint64_t _last_used;
|
||||
ZPhysicalMemory _physical;
|
||||
ZListNode<ZPage> _node;
|
||||
|
||||
// Hot when relocated and cached
|
||||
ZPhysicalMemory _physical; // Physical memory for page
|
||||
ZListNode<ZPage> _node; // Page list node
|
||||
void assert_initialized() const;
|
||||
|
||||
uint8_t type_from_size(size_t size) const;
|
||||
const char* type_to_string() const;
|
||||
|
||||
bool is_object_marked(uintptr_t addr) const;
|
||||
bool is_object_strongly_marked(uintptr_t addr) const;
|
||||
|
||||
public:
|
||||
ZPage(uint8_t type, ZVirtualMemory vmem, ZPhysicalMemory pmem);
|
||||
~ZPage();
|
||||
ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem);
|
||||
ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem);
|
||||
|
||||
uint32_t object_max_count() const;
|
||||
size_t object_alignment_shift() const;
|
||||
@ -67,17 +68,10 @@ public:
|
||||
uintptr_t top() const;
|
||||
size_t remaining() const;
|
||||
|
||||
uint8_t numa_id();
|
||||
|
||||
ZPhysicalMemory& physical_memory();
|
||||
const ZPhysicalMemory& physical_memory() const;
|
||||
const ZVirtualMemory& virtual_memory() const;
|
||||
|
||||
void reset();
|
||||
|
||||
bool is_in(uintptr_t addr) const;
|
||||
|
||||
uintptr_t block_start(uintptr_t addr) const;
|
||||
bool block_is_obj(uintptr_t addr) const;
|
||||
uint8_t numa_id();
|
||||
|
||||
bool is_allocating() const;
|
||||
bool is_relocatable() const;
|
||||
@ -85,6 +79,20 @@ public:
|
||||
bool is_mapped() const;
|
||||
void set_pre_mapped();
|
||||
|
||||
uint64_t last_used() const;
|
||||
void set_last_used();
|
||||
|
||||
void reset();
|
||||
|
||||
ZPage* retype(uint8_t type);
|
||||
ZPage* split(size_t size);
|
||||
ZPage* split(uint8_t type, size_t size);
|
||||
|
||||
bool is_in(uintptr_t addr) const;
|
||||
|
||||
uintptr_t block_start(uintptr_t addr) const;
|
||||
bool block_is_obj(uintptr_t addr) const;
|
||||
|
||||
bool is_marked() const;
|
||||
bool is_object_live(uintptr_t addr) const;
|
||||
bool is_object_strongly_live(uintptr_t addr) const;
|
||||
|
@ -34,10 +34,23 @@
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline uint8_t ZPage::type_from_size(size_t size) const {
|
||||
switch (size) {
|
||||
case ZPageSizeSmall:
|
||||
return ZPageTypeSmall;
|
||||
|
||||
case ZPageSizeMedium:
|
||||
return ZPageTypeMedium;
|
||||
|
||||
default:
|
||||
return ZPageTypeLarge;
|
||||
}
|
||||
}
|
||||
|
||||
inline const char* ZPage::type_to_string() const {
|
||||
switch (type()) {
|
||||
case ZPageTypeSmall:
|
||||
@ -116,7 +129,7 @@ inline size_t ZPage::remaining() const {
|
||||
return end() - top();
|
||||
}
|
||||
|
||||
inline ZPhysicalMemory& ZPage::physical_memory() {
|
||||
inline const ZPhysicalMemory& ZPage::physical_memory() const {
|
||||
return _physical;
|
||||
}
|
||||
|
||||
@ -132,23 +145,6 @@ inline uint8_t ZPage::numa_id() {
|
||||
return _numa_id;
|
||||
}
|
||||
|
||||
inline bool ZPage::is_in(uintptr_t addr) const {
|
||||
const uintptr_t offset = ZAddress::offset(addr);
|
||||
return offset >= start() && offset < top();
|
||||
}
|
||||
|
||||
inline uintptr_t ZPage::block_start(uintptr_t addr) const {
|
||||
if (block_is_obj(addr)) {
|
||||
return addr;
|
||||
} else {
|
||||
return ZAddress::good(top());
|
||||
}
|
||||
}
|
||||
|
||||
inline bool ZPage::block_is_obj(uintptr_t addr) const {
|
||||
return ZAddress::offset(addr) < top();
|
||||
}
|
||||
|
||||
inline bool ZPage::is_allocating() const {
|
||||
return _seqnum == ZGlobalSeqNum;
|
||||
}
|
||||
@ -168,6 +164,31 @@ inline void ZPage::set_pre_mapped() {
|
||||
_seqnum = 1;
|
||||
}
|
||||
|
||||
inline uint64_t ZPage::last_used() const {
|
||||
return _last_used;
|
||||
}
|
||||
|
||||
inline void ZPage::set_last_used() {
|
||||
_last_used = os::elapsedTime();
|
||||
}
|
||||
|
||||
inline bool ZPage::is_in(uintptr_t addr) const {
|
||||
const uintptr_t offset = ZAddress::offset(addr);
|
||||
return offset >= start() && offset < top();
|
||||
}
|
||||
|
||||
inline uintptr_t ZPage::block_start(uintptr_t addr) const {
|
||||
if (block_is_obj(addr)) {
|
||||
return addr;
|
||||
} else {
|
||||
return ZAddress::good(top());
|
||||
}
|
||||
}
|
||||
|
||||
inline bool ZPage::block_is_obj(uintptr_t addr) const {
|
||||
return ZAddress::offset(addr) < top();
|
||||
}
|
||||
|
||||
inline bool ZPage::is_marked() const {
|
||||
assert(is_relocatable(), "Invalid page state");
|
||||
return _livemap.is_marked();
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zCollectedHeap.hpp"
|
||||
#include "gc/z/zFuture.inline.hpp"
|
||||
@ -30,14 +31,16 @@
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zPageCache.inline.hpp"
|
||||
#include "gc/z/zPreMappedMemory.inline.hpp"
|
||||
#include "gc/z/zSafeDelete.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTracer.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheEvict("Memory", "Page Cache Evict", ZStatUnitBytesPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
|
||||
static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
|
||||
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
|
||||
|
||||
class ZPageAllocRequest : public StackObj {
|
||||
@ -85,37 +88,105 @@ public:
|
||||
|
||||
ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
|
||||
|
||||
ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
|
||||
ZPageAllocator::ZPageAllocator(size_t min_capacity,
|
||||
size_t initial_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve) :
|
||||
_lock(),
|
||||
_virtual(),
|
||||
_physical(max_capacity),
|
||||
_physical(),
|
||||
_cache(),
|
||||
_min_capacity(min_capacity),
|
||||
_max_capacity(max_capacity),
|
||||
_max_reserve(max_reserve),
|
||||
_pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)),
|
||||
_current_max_capacity(max_capacity),
|
||||
_capacity(0),
|
||||
_used_high(0),
|
||||
_used_low(0),
|
||||
_used(0),
|
||||
_allocated(0),
|
||||
_reclaimed(0),
|
||||
_queue(),
|
||||
_safe_delete() {}
|
||||
_safe_delete(),
|
||||
_uncommit(false),
|
||||
_initialized(false) {
|
||||
|
||||
if (!_virtual.is_initialized() || !_physical.is_initialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
|
||||
log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
|
||||
log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
|
||||
log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
|
||||
log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
|
||||
|
||||
// Warn if system limits could stop us from reaching max capacity
|
||||
_physical.warn_commit_limits(max_capacity);
|
||||
|
||||
// Commit initial capacity
|
||||
_capacity = _physical.commit(initial_capacity);
|
||||
if (_capacity != initial_capacity) {
|
||||
log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
|
||||
return;
|
||||
}
|
||||
|
||||
// If uncommit is not explicitly disabled, max capacity is greater than
|
||||
// min capacity, and uncommit is supported by the platform, then we will
|
||||
// try to uncommit unused memory.
|
||||
_uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
|
||||
if (_uncommit) {
|
||||
log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
|
||||
} else {
|
||||
log_info(gc, init)("Uncommit: Disabled");
|
||||
}
|
||||
|
||||
// Pre-map initial capacity
|
||||
prime_cache(initial_capacity);
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void ZPageAllocator::prime_cache(size_t size) {
|
||||
// Allocate physical memory
|
||||
const ZPhysicalMemory pmem = _physical.alloc(size);
|
||||
guarantee(!pmem.is_null(), "Invalid size");
|
||||
|
||||
// Allocate virtual memory
|
||||
const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
|
||||
guarantee(!vmem.is_null(), "Invalid size");
|
||||
|
||||
// Allocate page
|
||||
ZPage* const page = new ZPage(vmem, pmem);
|
||||
|
||||
// Map page
|
||||
map_page(page);
|
||||
page->set_pre_mapped();
|
||||
|
||||
// Add page to cache
|
||||
page->set_last_used();
|
||||
_cache.free_page(page);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_initialized() const {
|
||||
return _physical.is_initialized() &&
|
||||
_virtual.is_initialized() &&
|
||||
_pre_mapped.is_initialized();
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::min_capacity() const {
|
||||
return _min_capacity;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::max_capacity() const {
|
||||
return _physical.max_capacity();
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::current_max_capacity() const {
|
||||
return _physical.current_max_capacity();
|
||||
return _current_max_capacity;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::capacity() const {
|
||||
return _physical.capacity();
|
||||
return _capacity;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::max_reserve() const {
|
||||
@ -135,7 +206,7 @@ size_t ZPageAllocator::used() const {
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::unused() const {
|
||||
const ssize_t unused = (ssize_t)_physical.capacity() - (ssize_t)_used - (ssize_t)_max_reserve;
|
||||
const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
|
||||
return unused > 0 ? (size_t)unused : 0;
|
||||
}
|
||||
|
||||
@ -181,83 +252,40 @@ void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::max_available(bool no_reserve) const {
|
||||
size_t available = current_max_capacity() - used();
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, max_reserve());
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) {
|
||||
// Ensure that we always have space available for the reserve. This
|
||||
// is needed to avoid losing the reserve because of failure to map
|
||||
// more memory before reaching max capacity.
|
||||
_physical.try_ensure_unused_capacity(size + max_reserve());
|
||||
|
||||
size_t unused = _physical.unused_capacity();
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered unused
|
||||
unused -= MIN2(unused, max_reserve());
|
||||
}
|
||||
|
||||
return MIN2(size, unused);
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) {
|
||||
// This function is called during construction, where the
|
||||
// physical memory manager might have failed to initialied.
|
||||
if (!_physical.is_initialized()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return try_ensure_unused(size, true /* no_reserve */);
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
|
||||
// Allocate physical memory
|
||||
const ZPhysicalMemory pmem = _physical.alloc(size);
|
||||
if (pmem.is_null()) {
|
||||
// Out of memory
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Allocate virtual memory
|
||||
const ZVirtualMemory vmem = _virtual.alloc(size);
|
||||
if (vmem.is_null()) {
|
||||
// Out of address space
|
||||
_physical.free(pmem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Allocate physical memory
|
||||
const ZPhysicalMemory pmem = _physical.alloc(size);
|
||||
assert(!pmem.is_null(), "Invalid size");
|
||||
|
||||
// Allocate page
|
||||
return new ZPage(type, vmem, pmem);
|
||||
}
|
||||
|
||||
void ZPageAllocator::flush_pre_mapped() {
|
||||
if (_pre_mapped.available() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Detach the memory mapping.
|
||||
detach_memory(_pre_mapped.virtual_memory(), _pre_mapped.physical_memory());
|
||||
|
||||
_pre_mapped.clear();
|
||||
}
|
||||
|
||||
void ZPageAllocator::destroy_page(ZPage* page) {
|
||||
// Detach virtual and physical memory
|
||||
detach_memory(page->virtual_memory(), page->physical_memory());
|
||||
const ZVirtualMemory& vmem = page->virtual_memory();
|
||||
const ZPhysicalMemory& pmem = page->physical_memory();
|
||||
|
||||
// Unmap memory
|
||||
_physical.unmap(pmem, vmem.start());
|
||||
|
||||
// Free physical memory
|
||||
_physical.free(pmem);
|
||||
|
||||
// Free virtual memory
|
||||
_virtual.free(vmem);
|
||||
|
||||
// Delete page safely
|
||||
_safe_delete(page);
|
||||
}
|
||||
|
||||
void ZPageAllocator::map_page(ZPage* page) {
|
||||
void ZPageAllocator::map_page(const ZPage* page) const {
|
||||
// Map physical memory
|
||||
if (!page->is_mapped()) {
|
||||
_physical.map(page->physical_memory(), page->start());
|
||||
@ -266,57 +294,92 @@ void ZPageAllocator::map_page(ZPage* page) {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageAllocator::unmap_all_pages() {
|
||||
ZPhysicalMemory pmem(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax));
|
||||
_physical.debug_unmap(pmem, 0 /* offset */);
|
||||
pmem.clear();
|
||||
size_t ZPageAllocator::max_available(bool no_reserve) const {
|
||||
size_t available = _current_max_capacity - _used;
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, _max_reserve);
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
void ZPageAllocator::check_out_of_memory_during_initialization() {
|
||||
if (!is_init_completed()) {
|
||||
vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
|
||||
bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
|
||||
if (max_available(no_reserve) < size) {
|
||||
// Not enough free memory
|
||||
return false;
|
||||
}
|
||||
|
||||
// We add the max_reserve to the requested size to avoid losing
|
||||
// the reserve because of failure to increase capacity before
|
||||
// reaching max capacity.
|
||||
size += _max_reserve;
|
||||
|
||||
// Don't try to increase capacity if enough unused capacity
|
||||
// is available or if current max capacity has been reached.
|
||||
const size_t available = _capacity - _used;
|
||||
if (available < size && _capacity < _current_max_capacity) {
|
||||
// Try to increase capacity
|
||||
const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
|
||||
const size_t committed = _physical.commit(commit);
|
||||
_capacity += committed;
|
||||
|
||||
log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
|
||||
"Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
|
||||
"Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
|
||||
size / M, no_reserve ? "True" : "False", available / M,
|
||||
commit / M, committed / M, _capacity / M);
|
||||
|
||||
if (committed != commit) {
|
||||
// Failed, or partly failed, to increase capacity. Adjust current
|
||||
// max capacity to avoid further attempts to increase capacity.
|
||||
log_error(gc)("Forced to lower max Java heap size from "
|
||||
SIZE_FORMAT "M(%.0lf%%) to " SIZE_FORMAT "M(%.0lf%%)",
|
||||
_current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
|
||||
_capacity / M, percent_of(_capacity, _max_capacity));
|
||||
|
||||
_current_max_capacity = _capacity;
|
||||
}
|
||||
}
|
||||
|
||||
if (!no_reserve) {
|
||||
size -= _max_reserve;
|
||||
}
|
||||
|
||||
const size_t new_available = _capacity - _used;
|
||||
return new_available >= size;
|
||||
}
|
||||
|
||||
void ZPageAllocator::ensure_uncached_available(size_t size) {
|
||||
assert(_capacity - _used >= size, "Invalid size");
|
||||
const size_t uncached_available = _capacity - _used - _cache.available();
|
||||
if (size > uncached_available) {
|
||||
flush_cache_for_allocation(size - uncached_available);
|
||||
}
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
const size_t max = max_available(flags.no_reserve());
|
||||
if (max < size) {
|
||||
ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
|
||||
if (!ensure_available(size, no_reserve)) {
|
||||
// Not enough free memory
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Try allocating from the page cache
|
||||
ZPage* const cached_page = _cache.alloc_page(type, size);
|
||||
if (cached_page != NULL) {
|
||||
return cached_page;
|
||||
// Try allocate page from the cache
|
||||
ZPage* const page = _cache.alloc_page(type, size);
|
||||
if (page != NULL) {
|
||||
return page;
|
||||
}
|
||||
|
||||
// Try allocate from the pre-mapped memory
|
||||
ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size);
|
||||
if (pre_mapped_page != NULL) {
|
||||
return pre_mapped_page;
|
||||
}
|
||||
// Try flush pages from the cache
|
||||
ensure_uncached_available(size);
|
||||
|
||||
// Flush any remaining pre-mapped memory so that
|
||||
// subsequent allocations can use the physical memory.
|
||||
flush_pre_mapped();
|
||||
|
||||
// Try ensure that physical memory is available
|
||||
const size_t unused = try_ensure_unused(size, flags.no_reserve());
|
||||
if (unused < size) {
|
||||
// Try evict pages from the cache
|
||||
const size_t needed = size - unused;
|
||||
if (_cache.available() >= needed) {
|
||||
evict_cache(needed);
|
||||
}
|
||||
}
|
||||
|
||||
// Create new page and allocate physical memory
|
||||
// Create new page
|
||||
return create_page(type, size);
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
ZPage* const page = alloc_page_common_inner(type, size, flags);
|
||||
ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
|
||||
if (page == NULL) {
|
||||
// Out of memory
|
||||
return NULL;
|
||||
@ -326,11 +389,17 @@ ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationF
|
||||
increase_used(size, flags.relocation());
|
||||
|
||||
// Send trace event
|
||||
ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags);
|
||||
ZTracer::tracer()->report_page_alloc(size, _used, max_available(flags.no_reserve()), _cache.available(), flags);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPageAllocator::check_out_of_memory_during_initialization() {
|
||||
if (!is_init_completed()) {
|
||||
vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
|
||||
}
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
// Prepare to block
|
||||
ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
|
||||
@ -433,28 +502,15 @@ void ZPageAllocator::satisfy_alloc_queue() {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem) {
|
||||
const uintptr_t addr = vmem.start();
|
||||
|
||||
// Free virtual memory
|
||||
_virtual.free(vmem);
|
||||
|
||||
// Unmap physical memory
|
||||
_physical.unmap(pmem, addr);
|
||||
|
||||
// Free physical memory
|
||||
_physical.free(pmem);
|
||||
|
||||
// Clear physical mapping
|
||||
pmem.clear();
|
||||
}
|
||||
|
||||
void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
// Update used statistics
|
||||
decrease_used(page->size(), reclaimed);
|
||||
|
||||
// Set time when last used
|
||||
page->set_last_used();
|
||||
|
||||
// Cache page
|
||||
_cache.free_page(page);
|
||||
|
||||
@ -462,59 +518,157 @@ void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
|
||||
satisfy_alloc_queue();
|
||||
}
|
||||
|
||||
void ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl) {
|
||||
size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl) {
|
||||
ZList<ZPage> list;
|
||||
|
||||
// Flush pages
|
||||
_cache.flush(cl, &list);
|
||||
|
||||
const size_t overflushed = cl->overflushed();
|
||||
if (overflushed > 0) {
|
||||
// Overflushed, keep part of last page
|
||||
ZPage* const page = list.last()->split(overflushed);
|
||||
_cache.free_page(page);
|
||||
}
|
||||
|
||||
// Destroy pages
|
||||
size_t flushed = 0;
|
||||
for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
|
||||
flushed += page->size();
|
||||
destroy_page(page);
|
||||
}
|
||||
|
||||
return flushed;
|
||||
}
|
||||
|
||||
class ZPageCacheEvictClosure : public ZPageCacheFlushClosure {
|
||||
private:
|
||||
const size_t _requested;
|
||||
size_t _evicted;
|
||||
|
||||
class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
|
||||
public:
|
||||
ZPageCacheEvictClosure(size_t requested) :
|
||||
_requested(requested),
|
||||
_evicted(0) {}
|
||||
ZPageCacheFlushForAllocationClosure(size_t requested) :
|
||||
ZPageCacheFlushClosure(requested) {}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
if (_evicted < _requested) {
|
||||
// Evict page
|
||||
_evicted += page->size();
|
||||
if (_flushed < _requested) {
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't evict page
|
||||
// Don't flush page
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t evicted() const {
|
||||
return _evicted;
|
||||
}
|
||||
};
|
||||
|
||||
void ZPageAllocator::evict_cache(size_t requested) {
|
||||
// Evict pages
|
||||
ZPageCacheEvictClosure cl(requested);
|
||||
flush_cache(&cl);
|
||||
void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
|
||||
assert(requested <= _cache.available(), "Invalid request");
|
||||
|
||||
// Flush pages
|
||||
ZPageCacheFlushForAllocationClosure cl(requested);
|
||||
const size_t flushed = flush_cache(&cl);
|
||||
|
||||
assert(requested == flushed, "Failed to flush");
|
||||
|
||||
const size_t evicted = cl.evicted();
|
||||
const size_t cached_after = _cache.available();
|
||||
const size_t cached_before = cached_after + evicted;
|
||||
const size_t cached_before = cached_after + flushed;
|
||||
|
||||
log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
|
||||
"Evicted: " SIZE_FORMAT "M, Requested: " SIZE_FORMAT "M",
|
||||
"Flushed: " SIZE_FORMAT "M",
|
||||
cached_before / M, percent_of(cached_before, max_capacity()),
|
||||
cached_after / M, percent_of(cached_after, max_capacity()),
|
||||
evicted / M, requested / M);
|
||||
flushed / M);
|
||||
|
||||
// Update statistics
|
||||
ZStatInc(ZCounterPageCacheEvict, evicted);
|
||||
ZStatInc(ZCounterPageCacheFlush, flushed);
|
||||
}
|
||||
|
||||
class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
|
||||
private:
|
||||
const uint64_t _now;
|
||||
const uint64_t _delay;
|
||||
uint64_t _timeout;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
|
||||
ZPageCacheFlushClosure(requested),
|
||||
_now(os::elapsedTime()),
|
||||
_delay(delay),
|
||||
_timeout(_delay) {}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
const uint64_t expires = page->last_used() + _delay;
|
||||
const uint64_t timeout = expires - MIN2(expires, _now);
|
||||
|
||||
if (_flushed < _requested && timeout == 0) {
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Record shortest non-expired timeout
|
||||
_timeout = MIN2(_timeout, timeout);
|
||||
|
||||
// Don't flush page
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t timeout() const {
|
||||
return _timeout;
|
||||
}
|
||||
};
|
||||
|
||||
uint64_t ZPageAllocator::uncommit(uint64_t delay) {
|
||||
// Set the default timeout, when no pages are found in the
|
||||
// cache or when uncommit is disabled, equal to the delay.
|
||||
uint64_t timeout = delay;
|
||||
|
||||
if (!_uncommit) {
|
||||
// Disabled
|
||||
return timeout;
|
||||
}
|
||||
|
||||
size_t capacity_before;
|
||||
size_t capacity_after;
|
||||
size_t uncommitted;
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner joiner;
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
// Don't flush more than we will uncommit. Never uncommit
|
||||
// the reserve, and never uncommit below min capacity.
|
||||
const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
|
||||
const size_t guarded = MAX2(needed, _min_capacity);
|
||||
const size_t uncommittable = _capacity - guarded;
|
||||
const size_t uncached_available = _capacity - _used - _cache.available();
|
||||
size_t uncommit = MIN2(uncommittable, uncached_available);
|
||||
const size_t flush = uncommittable - uncommit;
|
||||
|
||||
if (flush > 0) {
|
||||
// Flush pages to uncommit
|
||||
ZPageCacheFlushForUncommitClosure cl(flush, delay);
|
||||
uncommit += flush_cache(&cl);
|
||||
timeout = cl.timeout();
|
||||
}
|
||||
|
||||
// Uncommit
|
||||
uncommitted = _physical.uncommit(uncommit);
|
||||
_capacity -= uncommitted;
|
||||
|
||||
capacity_after = _capacity;
|
||||
capacity_before = capacity_after + uncommitted;
|
||||
}
|
||||
|
||||
if (uncommitted > 0) {
|
||||
log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
|
||||
"Uncommitted: " SIZE_FORMAT "M",
|
||||
capacity_before / M, percent_of(capacity_before, max_capacity()),
|
||||
capacity_after / M, percent_of(capacity_after, max_capacity()),
|
||||
uncommitted / M);
|
||||
|
||||
// Update statistics
|
||||
ZStatInc(ZCounterUncommit, uncommitted);
|
||||
}
|
||||
|
||||
return timeout;
|
||||
}
|
||||
|
||||
void ZPageAllocator::enable_deferred_delete() const {
|
||||
@ -525,6 +679,35 @@ void ZPageAllocator::disable_deferred_delete() const {
|
||||
_safe_delete.disable_deferred_delete();
|
||||
}
|
||||
|
||||
void ZPageAllocator::debug_map_page(const ZPage* page) const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_physical.debug_map(page->physical_memory(), page->start());
|
||||
}
|
||||
|
||||
class ZPageCacheDebugMapClosure : public StackObj {
|
||||
private:
|
||||
const ZPageAllocator* const _allocator;
|
||||
|
||||
public:
|
||||
ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
|
||||
_allocator(allocator) {}
|
||||
|
||||
virtual void do_page(const ZPage* page) {
|
||||
_allocator->debug_map_page(page);
|
||||
}
|
||||
};
|
||||
|
||||
void ZPageAllocator::debug_map_cached_pages() const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
ZPageCacheDebugMapClosure cl(this);
|
||||
_cache.pages_do(&cl);
|
||||
}
|
||||
|
||||
void ZPageAllocator::debug_unmap_all_pages() const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_stalled() const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
return !_queue.is_empty();
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "gc/z/zPreMappedMemory.hpp"
|
||||
#include "gc/z/zSafeDelete.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -44,8 +43,11 @@ private:
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
ZPageCache _cache;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
const size_t _max_reserve;
|
||||
ZPreMappedMemory _pre_mapped;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _used;
|
||||
@ -53,39 +55,44 @@ private:
|
||||
ssize_t _reclaimed;
|
||||
ZList<ZPageAllocRequest> _queue;
|
||||
mutable ZSafeDelete<ZPage> _safe_delete;
|
||||
bool _uncommit;
|
||||
bool _initialized;
|
||||
|
||||
static ZPage* const gc_marker;
|
||||
|
||||
void prime_cache(size_t size);
|
||||
|
||||
void increase_used(size_t size, bool relocation);
|
||||
void decrease_used(size_t size, bool reclaimed);
|
||||
|
||||
size_t max_available(bool no_reserve) const;
|
||||
size_t try_ensure_unused(size_t size, bool no_reserve);
|
||||
size_t try_ensure_unused_for_pre_mapped(size_t size);
|
||||
|
||||
ZPage* create_page(uint8_t type, size_t size);
|
||||
void destroy_page(ZPage* page);
|
||||
|
||||
void flush_pre_mapped();
|
||||
void flush_cache(ZPageCacheFlushClosure* cl);
|
||||
void evict_cache(size_t requested);
|
||||
size_t max_available(bool no_reserve) const;
|
||||
bool ensure_available(size_t size, bool no_reserve);
|
||||
void ensure_uncached_available(size_t size);
|
||||
|
||||
void check_out_of_memory_during_initialization();
|
||||
|
||||
ZPage* alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
ZPage* alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve);
|
||||
ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
|
||||
size_t flush_cache(ZPageCacheFlushClosure* cl);
|
||||
void flush_cache_for_allocation(size_t requested);
|
||||
|
||||
void satisfy_alloc_queue();
|
||||
|
||||
void detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem);
|
||||
|
||||
public:
|
||||
ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve);
|
||||
ZPageAllocator(size_t min_capacity,
|
||||
size_t initial_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
@ -102,11 +109,16 @@ public:
|
||||
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
void free_page(ZPage* page, bool reclaimed);
|
||||
|
||||
uint64_t uncommit(uint64_t delay);
|
||||
|
||||
void enable_deferred_delete() const;
|
||||
void disable_deferred_delete() const;
|
||||
|
||||
void map_page(ZPage* page);
|
||||
void unmap_all_pages();
|
||||
void map_page(const ZPage* page) const;
|
||||
|
||||
void debug_map_page(const ZPage* page) const;
|
||||
void debug_map_cached_pages() const;
|
||||
void debug_unmap_all_pages() const;
|
||||
|
||||
bool is_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
@ -31,8 +31,17 @@
|
||||
|
||||
static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
|
||||
|
||||
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
|
||||
_requested(requested),
|
||||
_flushed(0) {}
|
||||
|
||||
size_t ZPageCacheFlushClosure::overflushed() const {
|
||||
return _flushed > _requested ? _flushed - _requested : 0;
|
||||
}
|
||||
|
||||
ZPageCache::ZPageCache() :
|
||||
_available(0),
|
||||
_small(),
|
||||
@ -67,40 +76,73 @@ ZPage* ZPageCache::alloc_small_page() {
|
||||
remote_numa_id++;
|
||||
}
|
||||
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_medium_page() {
|
||||
ZPage* const l1_page = _medium.remove_first();
|
||||
if (l1_page != NULL) {
|
||||
ZPage* const page = _medium.remove_first();
|
||||
if (page != NULL) {
|
||||
ZStatInc(ZCounterPageCacheHitL1);
|
||||
return l1_page;
|
||||
return page;
|
||||
}
|
||||
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_large_page(size_t size) {
|
||||
// Find a page with the right size
|
||||
ZListIterator<ZPage> iter(&_large);
|
||||
for (ZPage* l1_page; iter.next(&l1_page);) {
|
||||
if (l1_page->size() == size) {
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
if (size == page->size()) {
|
||||
// Page found
|
||||
_large.remove(l1_page);
|
||||
_large.remove(page);
|
||||
ZStatInc(ZCounterPageCacheHitL1);
|
||||
return l1_page;
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) {
|
||||
if (size <= ZPageSizeMedium) {
|
||||
return _medium.remove_first();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_large_page(size_t size) {
|
||||
// Find a page that is large enough
|
||||
ZListIterator<ZPage> iter(&_large);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
if (size <= page->size()) {
|
||||
// Page found
|
||||
_large.remove(page);
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_page(size_t size) {
|
||||
ZPage* page = alloc_oversized_large_page(size);
|
||||
if (page == NULL) {
|
||||
page = alloc_oversized_medium_page(size);
|
||||
}
|
||||
|
||||
if (page != NULL) {
|
||||
ZStatInc(ZCounterPageCacheHitL3);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
|
||||
ZPage* page;
|
||||
|
||||
// Try allocate exact page
|
||||
if (type == ZPageTypeSmall) {
|
||||
page = alloc_small_page();
|
||||
} else if (type == ZPageTypeMedium) {
|
||||
@ -109,14 +151,33 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
|
||||
page = alloc_large_page(size);
|
||||
}
|
||||
|
||||
if (page == NULL) {
|
||||
// Try allocate potentially oversized page
|
||||
ZPage* const oversized = alloc_oversized_page(size);
|
||||
if (oversized != NULL) {
|
||||
if (size < oversized->size()) {
|
||||
// Split oversized page
|
||||
page = oversized->split(type, size);
|
||||
|
||||
// Cache remainder
|
||||
free_page_inner(oversized);
|
||||
} else {
|
||||
// Re-type correctly sized page
|
||||
page = oversized->retype(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (page != NULL) {
|
||||
_available -= page->size();
|
||||
} else {
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPageCache::free_page(ZPage* page) {
|
||||
void ZPageCache::free_page_inner(ZPage* page) {
|
||||
const uint8_t type = page->type();
|
||||
if (type == ZPageTypeSmall) {
|
||||
_small.get(page->numa_id()).insert_first(page);
|
||||
@ -125,7 +186,10 @@ void ZPageCache::free_page(ZPage* page) {
|
||||
} else {
|
||||
_large.insert_first(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageCache::free_page(ZPage* page) {
|
||||
free_page_inner(page);
|
||||
_available += page->size();
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,13 @@
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZPageCacheFlushClosure : public StackObj {
|
||||
protected:
|
||||
const size_t _requested;
|
||||
size_t _flushed;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushClosure(size_t requested);
|
||||
size_t overflushed() const;
|
||||
virtual bool do_page(const ZPage* page) = 0;
|
||||
};
|
||||
|
||||
@ -45,6 +51,12 @@ private:
|
||||
ZPage* alloc_medium_page();
|
||||
ZPage* alloc_large_page(size_t size);
|
||||
|
||||
ZPage* alloc_oversized_medium_page(size_t size);
|
||||
ZPage* alloc_oversized_large_page(size_t size);
|
||||
ZPage* alloc_oversized_page(size_t size);
|
||||
|
||||
void free_page_inner(ZPage* page);
|
||||
|
||||
bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to);
|
||||
@ -58,6 +70,8 @@ public:
|
||||
void free_page(ZPage* page);
|
||||
|
||||
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
|
||||
|
||||
template <typename Closure> void pages_do(Closure* cl) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_HPP
|
||||
|
@ -24,10 +24,35 @@
|
||||
#ifndef SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
|
||||
inline size_t ZPageCache::available() const {
|
||||
return _available;
|
||||
}
|
||||
|
||||
template <typename Closure>
|
||||
inline void ZPageCache::pages_do(Closure* cl) const {
|
||||
// Small
|
||||
ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
|
||||
for (const ZList<ZPage>* list; iter_numa.next(&list);) {
|
||||
ZListIterator<ZPage> iter_small(list);
|
||||
for (ZPage* page; iter_small.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
// Medium
|
||||
ZListIterator<ZPage> iter_medium(&_medium);
|
||||
for (ZPage* page; iter_medium.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
|
||||
// Large
|
||||
ZListIterator<ZPage> iter_large(&_large);
|
||||
for (ZPage* page; iter_large.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
|
@ -33,18 +33,42 @@ ZPhysicalMemory::ZPhysicalMemory() :
|
||||
_nsegments(0),
|
||||
_segments(NULL) {}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(size_t size) :
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
add_segment(ZPhysicalMemorySegment(0, size));
|
||||
}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
add_segment(segment);
|
||||
}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
|
||||
// Copy segments
|
||||
for (size_t i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
}
|
||||
|
||||
const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
|
||||
// Free segments
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments = 0;
|
||||
|
||||
// Copy segments
|
||||
for (size_t i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
ZPhysicalMemory::~ZPhysicalMemory() {
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments = 0;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemory::size() const {
|
||||
size_t size = 0;
|
||||
|
||||
@ -55,134 +79,114 @@ size_t ZPhysicalMemory::size() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segment(ZPhysicalMemorySegment segment) {
|
||||
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
// Try merge with last segment
|
||||
if (_nsegments > 0) {
|
||||
ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
|
||||
assert(last.end() <= segment.start(), "Segments added out of order");
|
||||
if (last.end() == segment.start()) {
|
||||
// Merge
|
||||
last.expand(segment.size());
|
||||
last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Make room for a new segment
|
||||
const size_t size = sizeof(ZPhysicalMemorySegment) * (_nsegments + 1);
|
||||
_segments = (ZPhysicalMemorySegment*)ReallocateHeap((char*)_segments, size, mtGC);
|
||||
// Resize array
|
||||
ZPhysicalMemorySegment* const old_segments = _segments;
|
||||
_segments = new ZPhysicalMemorySegment[_nsegments + 1];
|
||||
for (size_t i = 0; i < _nsegments; i++) {
|
||||
_segments[i] = old_segments[i];
|
||||
}
|
||||
delete [] old_segments;
|
||||
|
||||
// Add new segment
|
||||
_segments[_nsegments] = segment;
|
||||
_nsegments++;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split(size_t split_size) {
|
||||
// Only splitting of single-segment instances have been implemented.
|
||||
assert(nsegments() == 1, "Can only have one segment");
|
||||
assert(split_size <= size(), "Invalid size");
|
||||
return ZPhysicalMemory(_segments[0].split(split_size));
|
||||
}
|
||||
ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
ZPhysicalMemory pmem;
|
||||
size_t nsegments = 0;
|
||||
|
||||
void ZPhysicalMemory::clear() {
|
||||
if (_segments != NULL) {
|
||||
FreeHeap(_segments);
|
||||
_segments = NULL;
|
||||
_nsegments = 0;
|
||||
for (size_t i = 0; i < _nsegments; i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments[i];
|
||||
if (pmem.size() < size) {
|
||||
if (pmem.size() + segment.size() <= size) {
|
||||
// Transfer segment
|
||||
pmem.add_segment(segment);
|
||||
} else {
|
||||
// Split segment
|
||||
const size_t split_size = size - pmem.size();
|
||||
pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
|
||||
_segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
|
||||
}
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments[nsegments++] = segment;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
|
||||
_backing(max_capacity),
|
||||
_max_capacity(max_capacity),
|
||||
_current_max_capacity(max_capacity),
|
||||
_capacity(0),
|
||||
_used(0) {}
|
||||
_nsegments = nsegments;
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::try_ensure_unused_capacity(size_t size) {
|
||||
const size_t unused = unused_capacity();
|
||||
if (unused >= size) {
|
||||
// Don't try to expand, enough unused capacity available
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t current_max = current_max_capacity();
|
||||
if (_capacity == current_max) {
|
||||
// Don't try to expand, current max capacity reached
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to expand
|
||||
const size_t old_capacity = capacity();
|
||||
const size_t new_capacity = MIN2(old_capacity + size - unused, current_max);
|
||||
_capacity = _backing.try_expand(old_capacity, new_capacity);
|
||||
|
||||
if (_capacity != new_capacity) {
|
||||
// Failed, or partly failed, to expand
|
||||
log_error(gc, init)("Not enough space available on the backing filesystem to hold the current max");
|
||||
log_error(gc, init)("Java heap size (" SIZE_FORMAT "M). Forcefully lowering max Java heap size to "
|
||||
SIZE_FORMAT "M (%.0lf%%).", current_max / M, _capacity / M,
|
||||
percent_of(_capacity, current_max));
|
||||
|
||||
// Adjust current max capacity to avoid further expand attempts
|
||||
_current_max_capacity = _capacity;
|
||||
}
|
||||
void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
|
||||
_backing.warn_commit_limits(max);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_commit(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
bool ZPhysicalMemoryManager::supports_uncommit() {
|
||||
return _backing.supports_uncommit();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
const uintptr_t addr = _backing.nmt_address(offset);
|
||||
const size_t size = pmem.size();
|
||||
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
const uintptr_t addr = _backing.nmt_address(offset);
|
||||
const size_t size = pmem.size();
|
||||
|
||||
Tracker tracker(Tracker::uncommit);
|
||||
tracker.record((address)addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
|
||||
if (unused_capacity() < size) {
|
||||
// Not enough memory available
|
||||
return ZPhysicalMemory();
|
||||
}
|
||||
size_t ZPhysicalMemoryManager::commit(size_t size) {
|
||||
return _backing.commit(size);
|
||||
}
|
||||
|
||||
_used += size;
|
||||
size_t ZPhysicalMemoryManager::uncommit(size_t size) {
|
||||
return _backing.uncommit(size);
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
|
||||
return _backing.alloc(size);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::free(ZPhysicalMemory pmem) {
|
||||
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
||||
_backing.free(pmem);
|
||||
_used -= pmem.size();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::map(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
// Map page
|
||||
void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
_backing.map(pmem, offset);
|
||||
|
||||
// Update native memory tracker
|
||||
nmt_commit(pmem, offset);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::unmap(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
// Update native memory tracker
|
||||
void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
nmt_uncommit(pmem, offset);
|
||||
|
||||
// Unmap page
|
||||
_backing.unmap(pmem, offset);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::debug_map(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
_backing.debug_map(pmem, offset);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
_backing.debug_unmap(pmem, offset);
|
||||
}
|
||||
|
@ -27,20 +27,18 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include OS_CPU_HEADER(gc/z/zPhysicalMemoryBacking)
|
||||
|
||||
class ZPhysicalMemorySegment {
|
||||
class ZPhysicalMemorySegment : public CHeapObj<mtGC> {
|
||||
private:
|
||||
uintptr_t _start;
|
||||
uintptr_t _end;
|
||||
|
||||
public:
|
||||
ZPhysicalMemorySegment();
|
||||
ZPhysicalMemorySegment(uintptr_t start, size_t size);
|
||||
|
||||
uintptr_t start() const;
|
||||
uintptr_t end() const;
|
||||
size_t size() const;
|
||||
|
||||
void expand(size_t size);
|
||||
ZPhysicalMemorySegment split(size_t size);
|
||||
};
|
||||
|
||||
class ZPhysicalMemory {
|
||||
@ -50,53 +48,45 @@ private:
|
||||
|
||||
public:
|
||||
ZPhysicalMemory();
|
||||
ZPhysicalMemory(size_t size);
|
||||
ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
|
||||
ZPhysicalMemory(const ZPhysicalMemory& pmem);
|
||||
const ZPhysicalMemory& operator=(const ZPhysicalMemory& pmem);
|
||||
~ZPhysicalMemory();
|
||||
|
||||
bool is_null() const;
|
||||
size_t size() const;
|
||||
|
||||
size_t nsegments() const;
|
||||
ZPhysicalMemorySegment segment(size_t index) const;
|
||||
void add_segment(ZPhysicalMemorySegment segment);
|
||||
const ZPhysicalMemorySegment& segment(size_t index) const;
|
||||
void add_segment(const ZPhysicalMemorySegment& segment);
|
||||
|
||||
ZPhysicalMemory split(size_t size);
|
||||
void clear();
|
||||
};
|
||||
|
||||
class ZPhysicalMemoryManager {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
const size_t _max_capacity;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used;
|
||||
|
||||
void nmt_commit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryManager(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t unused_capacity() const;
|
||||
void warn_commit_limits(size_t max) const;
|
||||
bool supports_uncommit();
|
||||
|
||||
void try_ensure_unused_capacity(size_t size);
|
||||
size_t commit(size_t size);
|
||||
size_t uncommit(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
void free(const ZPhysicalMemory& pmem);
|
||||
|
||||
void map(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void unmap(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
|
||||
void debug_map(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,10 @@
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() :
|
||||
_start(UINTPTR_MAX),
|
||||
_end(UINTPTR_MAX) {}
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) :
|
||||
_start(start),
|
||||
_end(start + size) {}
|
||||
@ -40,18 +44,7 @@ inline uintptr_t ZPhysicalMemorySegment::end() const {
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemorySegment::size() const {
|
||||
return end() - start();
|
||||
}
|
||||
|
||||
inline void ZPhysicalMemorySegment::expand(size_t size) {
|
||||
_end += size;
|
||||
}
|
||||
|
||||
inline ZPhysicalMemorySegment ZPhysicalMemorySegment::split(size_t split_size) {
|
||||
assert(split_size <= size(), "Invalid size");
|
||||
ZPhysicalMemorySegment segment(_start, split_size);
|
||||
_start += split_size;
|
||||
return segment;
|
||||
return _end - _start;
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemory::is_null() const {
|
||||
@ -62,25 +55,9 @@ inline size_t ZPhysicalMemory::nsegments() const {
|
||||
return _nsegments;
|
||||
}
|
||||
|
||||
inline ZPhysicalMemorySegment ZPhysicalMemory::segment(size_t index) const {
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(size_t index) const {
|
||||
assert(index < _nsegments, "Invalid segment index");
|
||||
return _segments[index];
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::max_capacity() const {
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::current_max_capacity() const {
|
||||
return _current_max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::capacity() const {
|
||||
return _capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::unused_capacity() const {
|
||||
return _capacity - _used;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
|
@ -1,87 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zPreMappedMemory.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
ZPreMappedMemory::ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryManager &pmm, size_t size) :
|
||||
_vmem(),
|
||||
_pmem(),
|
||||
_initialized(false) {
|
||||
if (!vmm.is_initialized() || !pmm.is_initialized()) {
|
||||
// Not initialized
|
||||
return;
|
||||
}
|
||||
|
||||
// Pre-mapping and pre-touching memory can take a long time. Log a message
|
||||
// to help the user understand why the JVM might seem slow to start.
|
||||
log_info(gc, init)("Pre-touching: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
|
||||
log_info(gc, init)("Pre-mapping: " SIZE_FORMAT "M", size / M);
|
||||
|
||||
if (size > 0) {
|
||||
_pmem = pmm.alloc(size);
|
||||
if (_pmem.is_null()) {
|
||||
// Out of memory
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate physical memory)");
|
||||
return;
|
||||
}
|
||||
|
||||
_vmem = vmm.alloc(size, true /* alloc_from_front */);
|
||||
if (_vmem.is_null()) {
|
||||
// Out of address space
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate virtual memory)");
|
||||
pmm.free(_pmem);
|
||||
return;
|
||||
}
|
||||
|
||||
// Map physical memory
|
||||
pmm.map(_pmem, _vmem.start());
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
ZPage* ZPreMappedMemory::alloc_page(uint8_t type, size_t size) {
|
||||
if (size > available()) {
|
||||
// Not enough pre-mapped memory
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Take a chunk of the pre-mapped memory
|
||||
const ZPhysicalMemory pmem = _pmem.split(size);
|
||||
const ZVirtualMemory vmem = _vmem.split(size);
|
||||
|
||||
ZPage* const page = new ZPage(type, vmem, pmem);
|
||||
page->set_pre_mapped();
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPreMappedMemory::clear() {
|
||||
assert(_pmem.is_null(), "Should be detached");
|
||||
_vmem.clear();
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
|
||||
#define SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
|
||||
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZPage;
|
||||
|
||||
class ZPreMappedMemory {
|
||||
private:
|
||||
ZVirtualMemory _vmem;
|
||||
ZPhysicalMemory _pmem;
|
||||
bool _initialized;
|
||||
|
||||
public:
|
||||
ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryManager &pmm, size_t size);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
ZPhysicalMemory& physical_memory();
|
||||
const ZVirtualMemory& virtual_memory() const;
|
||||
|
||||
size_t available() const;
|
||||
|
||||
ZPage* alloc_page(uint8_t type, size_t size);
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
|
74
src/hotspot/share/gc/z/zUncommitter.cpp
Normal file
74
src/hotspot/share/gc/z/zUncommitter.cpp
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
ZUncommitter::ZUncommitter() :
|
||||
_monitor(Monitor::leaf, "ZUncommitter", false, Monitor::_safepoint_check_never),
|
||||
_stop(false) {
|
||||
set_name("ZUncommitter");
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
bool ZUncommitter::idle(uint64_t timeout) {
|
||||
// Idle for at least one second
|
||||
const uint64_t expires = os::elapsedTime() + MAX2(timeout, 1ul);
|
||||
|
||||
for (;;) {
|
||||
// We might wake up spuriously from wait, so always recalculate
|
||||
// the timeout after a wakeup to see if we need to wait again.
|
||||
const uint64_t now = os::elapsedTime();
|
||||
const uint64_t remaining = expires - MIN2(expires, now);
|
||||
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
if (remaining > 0 && !_stop) {
|
||||
ml.wait(remaining * MILLIUNITS);
|
||||
} else {
|
||||
return !_stop;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::run_service() {
|
||||
for (;;) {
|
||||
// Try uncommit unused memory
|
||||
const uint64_t timeout = ZHeap::heap()->uncommit(ZUncommitDelay);
|
||||
|
||||
log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
|
||||
|
||||
// Idle until next attempt
|
||||
if (!idle(timeout)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::stop_service() {
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
_stop = true;
|
||||
ml.notify();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,25 +21,25 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
|
||||
#ifndef SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
#define SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
|
||||
#include "gc/z/zPreMappedMemory.hpp"
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
||||
inline bool ZPreMappedMemory::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
class ZUncommitter : public ConcurrentGCThread {
|
||||
private:
|
||||
Monitor _monitor;
|
||||
bool _stop;
|
||||
|
||||
inline ZPhysicalMemory& ZPreMappedMemory::physical_memory() {
|
||||
return _pmem;
|
||||
}
|
||||
bool idle(uint64_t timeout);
|
||||
|
||||
inline const ZVirtualMemory& ZPreMappedMemory::virtual_memory() const {
|
||||
return _vmem;
|
||||
}
|
||||
protected:
|
||||
virtual void run_service();
|
||||
virtual void stop_service();
|
||||
|
||||
inline size_t ZPreMappedMemory::available() const {
|
||||
return _vmem.size();
|
||||
}
|
||||
public:
|
||||
ZUncommitter();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
|
||||
#endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,6 +72,6 @@ ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front)
|
||||
return ZVirtualMemory(start, size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::free(ZVirtualMemory vmem) {
|
||||
void ZVirtualMemoryManager::free(const ZVirtualMemory& vmem) {
|
||||
_manager.free(vmem.start(), vmem.size());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,6 @@
|
||||
#define SHARE_GC_Z_ZVIRTUALMEMORY_HPP
|
||||
|
||||
#include "gc/z/zMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZVirtualMemory {
|
||||
friend class VMStructs;
|
||||
@ -42,8 +41,8 @@ public:
|
||||
uintptr_t start() const;
|
||||
uintptr_t end() const;
|
||||
size_t size() const;
|
||||
|
||||
ZVirtualMemory split(size_t size);
|
||||
void clear();
|
||||
};
|
||||
|
||||
class ZVirtualMemoryManager {
|
||||
@ -60,7 +59,7 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
ZVirtualMemory alloc(size_t size, bool alloc_from_front = false);
|
||||
void free(ZVirtualMemory vmem);
|
||||
void free(const ZVirtualMemory& vmem);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,16 +51,9 @@ inline size_t ZVirtualMemory::size() const {
|
||||
return _end - _start;
|
||||
}
|
||||
|
||||
inline ZVirtualMemory ZVirtualMemory::split(size_t split_size) {
|
||||
assert(split_size <= size(), "precondition");
|
||||
ZVirtualMemory mem(_start, split_size);
|
||||
_start += split_size;
|
||||
return mem;
|
||||
}
|
||||
|
||||
inline void ZVirtualMemory::clear() {
|
||||
_start = UINTPTR_MAX;
|
||||
_end = UINTPTR_MAX;
|
||||
inline ZVirtualMemory ZVirtualMemory::split(size_t size) {
|
||||
_start += size;
|
||||
return ZVirtualMemory(_start - size, size);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
|
||||
|
@ -56,6 +56,13 @@
|
||||
experimental(uint, ZCollectionInterval, 0, \
|
||||
"Force GC at a fixed time interval (in seconds)") \
|
||||
\
|
||||
experimental(bool, ZUncommit, true, \
|
||||
"Uncommit unused memory") \
|
||||
\
|
||||
experimental(uintx, ZUncommitDelay, 5 * 60, \
|
||||
"Uncommit memory if it has been unused for the specified " \
|
||||
"amount of time (in seconds)") \
|
||||
\
|
||||
diagnostic(uint, ZStatisticsInterval, 10, \
|
||||
"Time between statistics print outs (in seconds)") \
|
||||
range(1, (uint)-1) \
|
||||
|
@ -27,8 +27,6 @@ package sun.jvm.hotspot.gc.z;
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
@ -37,7 +35,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
public class ZPageAllocator extends VMObject {
|
||||
|
||||
private static long physicalFieldOffset;
|
||||
private static CIntegerField maxCapacityField;
|
||||
private static CIntegerField capacityField;
|
||||
private static CIntegerField usedField;
|
||||
|
||||
static {
|
||||
@ -47,21 +46,17 @@ public class ZPageAllocator extends VMObject {
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ZPageAllocator");
|
||||
|
||||
physicalFieldOffset = type.getAddressField("_physical").getOffset();
|
||||
maxCapacityField = type.getCIntegerField("_max_capacity");
|
||||
capacityField = type.getCIntegerField("_capacity");
|
||||
usedField = type.getCIntegerField("_used");
|
||||
}
|
||||
|
||||
private ZPhysicalMemoryManager physical() {
|
||||
Address physicalAddr = addr.addOffsetTo(physicalFieldOffset);
|
||||
return (ZPhysicalMemoryManager)VMObjectFactory.newObject(ZPhysicalMemoryManager.class, physicalAddr);
|
||||
}
|
||||
|
||||
public long maxCapacity() {
|
||||
return physical().maxCapacity();
|
||||
return maxCapacityField.getValue(addr);
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
return physical().capacity();
|
||||
return capacityField.getValue(addr);
|
||||
}
|
||||
|
||||
public long used() {
|
||||
|
@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.gc.z;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for ZPhysicalMemoryManager
|
||||
|
||||
public class ZPhysicalMemoryManager extends VMObject {
|
||||
|
||||
private static CIntegerField capacityField;
|
||||
|
||||
private static CIntegerField maxCapacityField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ZPhysicalMemoryManager");
|
||||
|
||||
capacityField = type.getCIntegerField("_capacity");
|
||||
maxCapacityField = type.getCIntegerField("_max_capacity");
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
return capacityField.getValue(addr);
|
||||
}
|
||||
|
||||
public long maxCapacity() {
|
||||
return maxCapacityField.getValue(addr);
|
||||
}
|
||||
|
||||
public ZPhysicalMemoryManager(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
}
|
@ -169,9 +169,6 @@ public:
|
||||
|
||||
// Teardown forwarding
|
||||
ZForwarding::destroy(forwarding);
|
||||
|
||||
// Teardown page
|
||||
page.physical_memory().clear();
|
||||
}
|
||||
|
||||
// Run the given function with a few different input values.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,59 +22,121 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
#if defined(AMD64)
|
||||
TEST(ZPhysicalMemoryTest, copy) {
|
||||
const ZPhysicalMemorySegment seg0(0, 100);
|
||||
const ZPhysicalMemorySegment seg1(200, 100);
|
||||
|
||||
TEST(ZPhysicalMemorySegmentTest, split) {
|
||||
ZPhysicalMemorySegment seg(0, 10 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem0;
|
||||
pmem0.add_segment(seg0);
|
||||
EXPECT_EQ(pmem0.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem0.segment(0).size(), 100u);
|
||||
|
||||
ZPhysicalMemorySegment seg_split0 = seg.split(0 * ZGranuleSize);
|
||||
EXPECT_EQ(seg_split0.size(), 0 * ZGranuleSize);
|
||||
EXPECT_EQ( seg.size(), 10 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem1;
|
||||
pmem1.add_segment(seg0);
|
||||
pmem1.add_segment(seg1);
|
||||
EXPECT_EQ(pmem1.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem1.segment(0).size(), 100u);
|
||||
EXPECT_EQ(pmem1.segment(1).size(), 100u);
|
||||
|
||||
ZPhysicalMemorySegment seg_split1 = seg.split(5 * ZGranuleSize);
|
||||
EXPECT_EQ(seg_split1.size(), 5 * ZGranuleSize);
|
||||
EXPECT_EQ( seg.size(), 5 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem2(pmem0);
|
||||
EXPECT_EQ(pmem2.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem2.segment(0).size(), 100u);
|
||||
|
||||
ZPhysicalMemorySegment seg_split2 = seg.split(5 * ZGranuleSize);
|
||||
EXPECT_EQ(seg_split2.size(), 5 * ZGranuleSize);
|
||||
EXPECT_EQ( seg.size(), 0 * ZGranuleSize);
|
||||
pmem2 = pmem1;
|
||||
EXPECT_EQ(pmem2.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem2.segment(0).size(), 100u);
|
||||
EXPECT_EQ(pmem2.segment(1).size(), 100u);
|
||||
}
|
||||
|
||||
ZPhysicalMemorySegment seg_split3 = seg.split(0 * ZGranuleSize);
|
||||
EXPECT_EQ(seg_split3.size(), 0 * ZGranuleSize);
|
||||
EXPECT_EQ( seg.size(), 0 * ZGranuleSize);
|
||||
TEST(ZPhysicalMemoryTest, segments) {
|
||||
const ZPhysicalMemorySegment seg0(0, 1);
|
||||
const ZPhysicalMemorySegment seg1(1, 1);
|
||||
const ZPhysicalMemorySegment seg2(2, 1);
|
||||
const ZPhysicalMemorySegment seg3(3, 1);
|
||||
const ZPhysicalMemorySegment seg4(4, 1);
|
||||
const ZPhysicalMemorySegment seg5(5, 1);
|
||||
const ZPhysicalMemorySegment seg6(6, 1);
|
||||
|
||||
ZPhysicalMemory pmem0;
|
||||
EXPECT_EQ(pmem0.nsegments(), 0u);
|
||||
EXPECT_EQ(pmem0.is_null(), true);
|
||||
|
||||
ZPhysicalMemory pmem1;
|
||||
pmem1.add_segment(seg0);
|
||||
pmem1.add_segment(seg1);
|
||||
pmem1.add_segment(seg2);
|
||||
pmem1.add_segment(seg3);
|
||||
pmem1.add_segment(seg4);
|
||||
pmem1.add_segment(seg5);
|
||||
pmem1.add_segment(seg6);
|
||||
EXPECT_EQ(pmem1.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem1.segment(0).size(), 7u);
|
||||
EXPECT_EQ(pmem1.is_null(), false);
|
||||
|
||||
ZPhysicalMemory pmem2;
|
||||
pmem2.add_segment(seg0);
|
||||
pmem2.add_segment(seg1);
|
||||
pmem2.add_segment(seg2);
|
||||
pmem2.add_segment(seg4);
|
||||
pmem2.add_segment(seg5);
|
||||
pmem2.add_segment(seg6);
|
||||
EXPECT_EQ(pmem2.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem2.segment(0).size(), 3u);
|
||||
EXPECT_EQ(pmem2.segment(1).size(), 3u);
|
||||
EXPECT_EQ(pmem2.is_null(), false);
|
||||
|
||||
ZPhysicalMemory pmem3;
|
||||
pmem3.add_segment(seg0);
|
||||
pmem3.add_segment(seg2);
|
||||
pmem3.add_segment(seg3);
|
||||
pmem3.add_segment(seg4);
|
||||
pmem3.add_segment(seg6);
|
||||
EXPECT_EQ(pmem3.nsegments(), 3u);
|
||||
EXPECT_EQ(pmem3.segment(0).size(), 1u);
|
||||
EXPECT_EQ(pmem3.segment(1).size(), 3u);
|
||||
EXPECT_EQ(pmem3.segment(2).size(), 1u);
|
||||
EXPECT_EQ(pmem3.is_null(), false);
|
||||
|
||||
ZPhysicalMemory pmem4;
|
||||
pmem4.add_segment(seg0);
|
||||
pmem4.add_segment(seg2);
|
||||
pmem4.add_segment(seg4);
|
||||
pmem4.add_segment(seg6);
|
||||
EXPECT_EQ(pmem4.nsegments(), 4u);
|
||||
EXPECT_EQ(pmem4.segment(0).size(), 1u);
|
||||
EXPECT_EQ(pmem4.segment(1).size(), 1u);
|
||||
EXPECT_EQ(pmem4.segment(2).size(), 1u);
|
||||
EXPECT_EQ(pmem4.segment(3).size(), 1u);
|
||||
EXPECT_EQ(pmem4.is_null(), false);
|
||||
}
|
||||
|
||||
TEST(ZPhysicalMemoryTest, split) {
|
||||
ZPhysicalMemoryManager pmem_manager(10 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem;
|
||||
|
||||
pmem_manager.try_ensure_unused_capacity(10 * ZGranuleSize);
|
||||
EXPECT_EQ(pmem_manager.unused_capacity(), 10 * ZGranuleSize);
|
||||
pmem.add_segment(ZPhysicalMemorySegment(0, 10));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(10, 10));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(30, 10));
|
||||
EXPECT_EQ(pmem.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem.size(), 30u);
|
||||
|
||||
ZPhysicalMemory pmem = pmem_manager.alloc(8 * ZGranuleSize);
|
||||
EXPECT_EQ(pmem.nsegments(), 1u) << "wrong number of segments";
|
||||
ZPhysicalMemory pmem0 = pmem.split(1);
|
||||
EXPECT_EQ(pmem0.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem0.size(), 1u);
|
||||
EXPECT_EQ(pmem.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem.size(), 29u);
|
||||
|
||||
ZPhysicalMemory split0_pmem = pmem.split(ZGranuleSize);
|
||||
EXPECT_EQ(split0_pmem.nsegments(), 1u);
|
||||
EXPECT_EQ( pmem.nsegments(), 1u);
|
||||
EXPECT_EQ(split0_pmem.size(), 1 * ZGranuleSize);
|
||||
EXPECT_EQ( pmem.size(), 7 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem1 = pmem.split(25);
|
||||
EXPECT_EQ(pmem1.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem1.size(), 25u);
|
||||
EXPECT_EQ(pmem.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem.size(), 4u);
|
||||
|
||||
ZPhysicalMemory split1_pmem = pmem.split(2 * ZGranuleSize);
|
||||
EXPECT_EQ(split1_pmem.nsegments(), 1u);
|
||||
EXPECT_EQ( pmem.nsegments(), 1u);
|
||||
EXPECT_EQ(split1_pmem.size(), 2 * ZGranuleSize);
|
||||
EXPECT_EQ( pmem.size(), 5 * ZGranuleSize);
|
||||
|
||||
ZPhysicalMemory split2_pmem = pmem.split(5 * ZGranuleSize);
|
||||
EXPECT_EQ(split2_pmem.nsegments(), 1u);
|
||||
EXPECT_EQ( pmem.nsegments(), 1u);
|
||||
EXPECT_EQ(split2_pmem.size(), 5 * ZGranuleSize);
|
||||
EXPECT_EQ( pmem.size(), 0 * ZGranuleSize);
|
||||
ZPhysicalMemory pmem2 = pmem.split(4);
|
||||
EXPECT_EQ(pmem2.nsegments(), 1u);
|
||||
EXPECT_EQ(pmem2.size(), 4u);
|
||||
EXPECT_EQ(pmem.nsegments(), 0u);
|
||||
EXPECT_EQ(pmem.size(), 0u);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,26 +23,23 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
TEST(ZVirtualMemory, split) {
|
||||
const size_t PageSize = 2 * M;
|
||||
ZVirtualMemory vmem(0, 10);
|
||||
|
||||
ZVirtualMemory mem(0, 10 * PageSize);
|
||||
ZVirtualMemory vmem0 = vmem.split(0);
|
||||
EXPECT_EQ(vmem0.size(), 0u);
|
||||
EXPECT_EQ(vmem.size(), 10u);
|
||||
|
||||
ZVirtualMemory mem_split0 = mem.split(0 * PageSize);
|
||||
EXPECT_EQ(mem_split0.size(), 0 * PageSize);
|
||||
EXPECT_EQ( mem.size(), 10 * PageSize);
|
||||
ZVirtualMemory vmem1 = vmem.split(5);
|
||||
EXPECT_EQ(vmem1.size(), 5u);
|
||||
EXPECT_EQ(vmem.size(), 5u);
|
||||
|
||||
ZVirtualMemory mem_split1 = mem.split(5u * PageSize);
|
||||
EXPECT_EQ(mem_split1.size(), 5 * PageSize);
|
||||
EXPECT_EQ( mem.size(), 5 * PageSize);
|
||||
ZVirtualMemory vmem2 = vmem.split(5);
|
||||
EXPECT_EQ(vmem2.size(), 5u);
|
||||
EXPECT_EQ(vmem.size(), 0u);
|
||||
|
||||
ZVirtualMemory mem_split2 = mem.split(5u * PageSize);
|
||||
EXPECT_EQ(mem_split2.size(), 5 * PageSize);
|
||||
EXPECT_EQ( mem.size(), 0 * PageSize);
|
||||
|
||||
ZVirtualMemory mem_split3 = mem.split(0 * PageSize);
|
||||
EXPECT_EQ(mem_split3.size(), 0 * PageSize);
|
||||
ZVirtualMemory vmem3 = vmem.split(0);
|
||||
EXPECT_EQ(vmem3.size(), 0u);
|
||||
}
|
||||
|
@ -37,6 +37,5 @@ serviceability/sa/ClhsdbSource.java 8220624 generic-
|
||||
serviceability/sa/TestClhsdbJstackLock.java 8220624 generic-all
|
||||
serviceability/sa/TestHeapDumpForInvokeDynamic.java 8220624 generic-all
|
||||
serviceability/sa/TestHeapDumpForLargeArray.java 8220624 generic-all
|
||||
serviceability/sa/TestUniverse.java 8220624 generic-all
|
||||
serviceability/sa/TestJmapCore.java 8220624 generic-all
|
||||
serviceability/sa/TestJmapCoreMetaspace.java 8219443 generic-all
|
||||
|
129
test/hotspot/jtreg/gc/z/TestUncommit.java
Normal file
129
test/hotspot/jtreg/gc/z/TestUncommit.java
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package gc.z;
|
||||
|
||||
/*
|
||||
* @test TestUncommit
|
||||
* @requires vm.gc.Z
|
||||
* @summary Test ZGC uncommit unused memory
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 3
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class TestUncommit {
|
||||
private static final int delay = 10; // seconds
|
||||
private static final int allocSize = 200 * 1024 * 1024; // 200M
|
||||
private static final int smallObjectSize = 4 * 1024; // 4K
|
||||
private static final int mediumObjectSize = 2 * 1024 * 1024; // 2M
|
||||
private static final int largeObjectSize = allocSize;
|
||||
|
||||
private static volatile ArrayList<byte[]> keepAlive;
|
||||
|
||||
private static long capacity() {
|
||||
return Runtime.getRuntime().totalMemory();
|
||||
}
|
||||
|
||||
private static void allocate(int objectSize) {
|
||||
keepAlive = new ArrayList<>();
|
||||
for (int i = 0; i < allocSize; i+= objectSize) {
|
||||
keepAlive.add(new byte[objectSize]);
|
||||
}
|
||||
}
|
||||
|
||||
private static void reclaim() {
|
||||
keepAlive = null;
|
||||
System.gc();
|
||||
}
|
||||
|
||||
private static void test(boolean enabled, int objectSize) throws Exception {
|
||||
final var beforeAlloc = capacity();
|
||||
|
||||
// Allocate memory
|
||||
allocate(objectSize);
|
||||
|
||||
final var afterAlloc = capacity();
|
||||
|
||||
// Reclaim memory
|
||||
reclaim();
|
||||
|
||||
// Wait shorter than the uncommit delay
|
||||
Thread.sleep(delay * 1000 / 2);
|
||||
|
||||
final var beforeUncommit = capacity();
|
||||
|
||||
// Wait longer than the uncommit delay
|
||||
Thread.sleep(delay * 1000);
|
||||
|
||||
final var afterUncommit = capacity();
|
||||
|
||||
System.out.println(" Uncommit Enabled: " + enabled);
|
||||
System.out.println(" Uncommit Delay: " + delay);
|
||||
System.out.println(" Object Size: " + objectSize);
|
||||
System.out.println(" Alloc Size: " + allocSize);
|
||||
System.out.println(" Before Alloc: " + beforeAlloc);
|
||||
System.out.println(" After Alloc: " + afterAlloc);
|
||||
System.out.println(" Before Uncommit: " + beforeUncommit);
|
||||
System.out.println(" After Uncommit: " + afterUncommit);
|
||||
System.out.println();
|
||||
|
||||
// Verify
|
||||
if (enabled) {
|
||||
if (beforeUncommit == beforeAlloc) {
|
||||
throw new Exception("Uncommitted too fast");
|
||||
}
|
||||
|
||||
if (afterUncommit >= afterAlloc) {
|
||||
throw new Exception("Uncommitted too slow");
|
||||
}
|
||||
|
||||
if (afterUncommit < beforeAlloc) {
|
||||
throw new Exception("Uncommitted too much");
|
||||
}
|
||||
|
||||
if (afterUncommit > beforeAlloc) {
|
||||
throw new Exception("Uncommitted too little");
|
||||
}
|
||||
} else {
|
||||
if (afterAlloc > beforeUncommit ||
|
||||
afterAlloc > afterUncommit) {
|
||||
throw new Exception("Should not uncommit");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
final boolean enabled = Boolean.parseBoolean(args[0]);
|
||||
final int iterations = Integer.parseInt(args[1]);
|
||||
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
System.out.println("Iteration " + i);
|
||||
test(enabled, smallObjectSize);
|
||||
test(enabled, mediumObjectSize);
|
||||
test(enabled, largeObjectSize);
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user