8245208: ZGC: Don't hold the ZPageAllocator lock while committing/uncommitting memory
Reviewed-by: eosterlund, stefank
This commit is contained in:
parent
cd16b568ce
commit
d7e68f375c
@ -93,11 +93,11 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
@ -116,7 +116,7 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -144,7 +144,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
|
@ -29,17 +29,17 @@ private:
|
||||
uintptr_t _base;
|
||||
bool _initialized;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length);
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max) const;
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length);
|
||||
size_t uncommit(size_t offset, size_t length);
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
|
@ -302,7 +302,7 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
|
||||
void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
if (_available == 0) {
|
||||
@ -316,18 +316,18 @@ void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (_available < max) {
|
||||
if (_available < max_capacity) {
|
||||
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
|
||||
log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
|
||||
"(available", max / M);
|
||||
"(available", max_capacity / M);
|
||||
log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
|
||||
"size could", _available / M);
|
||||
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
|
||||
"size could", _available / M);
|
||||
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
|
||||
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
|
||||
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
|
||||
FILE* const file = fopen(filename, "r");
|
||||
if (file == NULL) {
|
||||
@ -350,24 +350,24 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
|
||||
// However, ZGC tends to create the most mappings and dominate the total count.
|
||||
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
|
||||
const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
|
||||
log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max / M, filename);
|
||||
max_capacity / M, filename);
|
||||
log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
|
||||
"with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning_p(gc)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
"with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Warn if available space is too low
|
||||
warn_available_space(max);
|
||||
warn_available_space(max_capacity);
|
||||
|
||||
// Warn if max map count is too low
|
||||
warn_max_map_count(max);
|
||||
warn_max_map_count(max_capacity);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::is_tmpfs() const {
|
||||
@ -477,7 +477,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t len
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
|
||||
@ -491,7 +491,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t
|
||||
}
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = ZSyscall::fallocate(_fd, mode, offset, length);
|
||||
if (res == -1) {
|
||||
@ -503,7 +503,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
@ -530,7 +530,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length)
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
|
||||
if (ZLargePages::is_explicit()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
@ -553,7 +553,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
// Try first half
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
@ -574,7 +574,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
@ -590,7 +590,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t
|
||||
return err;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
@ -627,7 +627,7 @@ static int offset_to_node(size_t offset) {
|
||||
return mapping->at((int)nindex);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
|
||||
size_t committed = 0;
|
||||
|
||||
// Commit one granule at a time, so that each granule
|
||||
@ -652,7 +652,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t len
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -680,7 +680,7 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
|
||||
// To get granule-level NUMA interleaving when using non-large pages,
|
||||
// we must explicitly interleave the memory at commit/fallocate time.
|
||||
@ -690,7 +690,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
|
||||
return commit_default(offset, length);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
|
@ -35,8 +35,8 @@ private:
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
void warn_available_space(size_t max) const;
|
||||
void warn_max_map_count(size_t max) const;
|
||||
void warn_available_space(size_t max_capacity) const;
|
||||
void warn_max_map_count(size_t max_capacity) const;
|
||||
|
||||
int create_mem_fd(const char* name) const;
|
||||
int create_file_fd(const char* name) const;
|
||||
@ -49,26 +49,26 @@ private:
|
||||
ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
|
||||
ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole(size_t offset, size_t length);
|
||||
ZErrno fallocate_punch_hole(size_t offset, size_t length);
|
||||
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_punch_hole(size_t offset, size_t length) const;
|
||||
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length);
|
||||
size_t commit_numa_interleaved(size_t offset, size_t length);
|
||||
size_t commit_default(size_t offset, size_t length);
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
size_t commit_numa_interleaved(size_t offset, size_t length) const;
|
||||
size_t commit_default(size_t offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max) const;
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length);
|
||||
size_t uncommit(size_t offset, size_t length);
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
|
@ -42,7 +42,7 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max) const;
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length);
|
||||
size_t uncommit(size_t offset, size_t length);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,8 +77,8 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
volatile_nonstatic_field(ZPage, _top, uintptr_t) \
|
||||
\
|
||||
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
|
||||
nonstatic_field(ZPageAllocator, _capacity, size_t) \
|
||||
nonstatic_field(ZPageAllocator, _used, size_t) \
|
||||
volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \
|
||||
volatile_nonstatic_field(ZPageAllocator, _used, size_t) \
|
||||
\
|
||||
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,20 +31,22 @@
|
||||
// Allocation flags layout
|
||||
// -----------------------
|
||||
//
|
||||
// 7 3 2 1 0
|
||||
// +----+-+-+-+-+
|
||||
// |0000|1|1|1|1|
|
||||
// +----+-+-+-+-+
|
||||
// | | | | |
|
||||
// | | | | * 0-0 Worker Thread Flag (1-bit)
|
||||
// | | | |
|
||||
// | | | * 1-1 Non-Blocking Flag (1-bit)
|
||||
// | | |
|
||||
// | | * 2-2 Relocation Flag (1-bit)
|
||||
// | |
|
||||
// | * 3-3 No Reserve Flag (1-bit)
|
||||
// 7 4 3 2 1 0
|
||||
// +---+-+-+-+-+-+
|
||||
// |000|1|1|1|1|1|
|
||||
// +---+-+-+-+-+-+
|
||||
// | | | | | |
|
||||
// | | | | | * 0-0 Worker Thread Flag (1-bit)
|
||||
// | | | | |
|
||||
// | | | | * 1-1 Non-Blocking Flag (1-bit)
|
||||
// | | | |
|
||||
// | | | * 2-2 Relocation Flag (1-bit)
|
||||
// | | |
|
||||
// | | * 3-3 No Reserve Flag (1-bit)
|
||||
// | |
|
||||
// | * 4-4 Low Address Flag (1-bit)
|
||||
// |
|
||||
// * 7-4 Unused (4-bits)
|
||||
// * 7-5 Unused (3-bits)
|
||||
//
|
||||
|
||||
class ZAllocationFlags {
|
||||
@ -53,6 +55,7 @@ private:
|
||||
typedef ZBitField<uint8_t, bool, 1, 1> field_non_blocking;
|
||||
typedef ZBitField<uint8_t, bool, 2, 1> field_relocation;
|
||||
typedef ZBitField<uint8_t, bool, 3, 1> field_no_reserve;
|
||||
typedef ZBitField<uint8_t, bool, 4, 1> field_low_address;
|
||||
|
||||
uint8_t _flags;
|
||||
|
||||
@ -76,6 +79,10 @@ public:
|
||||
_flags |= field_no_reserve::encode(true);
|
||||
}
|
||||
|
||||
void set_low_address() {
|
||||
_flags |= field_low_address::encode(true);
|
||||
}
|
||||
|
||||
bool worker_thread() const {
|
||||
return field_worker_thread::decode(_flags);
|
||||
}
|
||||
@ -91,6 +98,10 @@ public:
|
||||
bool no_reserve() const {
|
||||
return field_no_reserve::decode(_flags);
|
||||
}
|
||||
|
||||
bool low_address() const {
|
||||
return field_low_address::decode(_flags);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,6 +70,18 @@ void ZArguments::initialize() {
|
||||
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
|
||||
}
|
||||
|
||||
// Select medium page size so that we can calculate the max reserve
|
||||
ZHeuristics::set_medium_page_size();
|
||||
|
||||
// MinHeapSize/InitialHeapSize must be at least as large as the max reserve
|
||||
const size_t max_reserve = ZHeuristics::max_reserve();
|
||||
if (MinHeapSize < max_reserve) {
|
||||
FLAG_SET_ERGO(MinHeapSize, max_reserve);
|
||||
}
|
||||
if (InitialHeapSize < max_reserve) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, max_reserve);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Enable loop strip mining by default
|
||||
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/z/zCollectedHeap.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zDriver.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zNMethod.hpp"
|
||||
@ -52,7 +54,6 @@ ZCollectedHeap::ZCollectedHeap() :
|
||||
_heap(),
|
||||
_director(new ZDirector()),
|
||||
_driver(new ZDriver()),
|
||||
_uncommitter(new ZUncommitter()),
|
||||
_stat(new ZStat()),
|
||||
_runtime_workers() {}
|
||||
|
||||
@ -78,11 +79,19 @@ void ZCollectedHeap::initialize_serviceability() {
|
||||
_heap.serviceability_initialize();
|
||||
}
|
||||
|
||||
class ZStopConcurrentGCThreadClosure : public ThreadClosure {
|
||||
public:
|
||||
virtual void do_thread(Thread* thread) {
|
||||
if (thread->is_ConcurrentGC_thread() &&
|
||||
!thread->is_GC_task_thread()) {
|
||||
static_cast<ConcurrentGCThread*>(thread)->stop();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void ZCollectedHeap::stop() {
|
||||
_director->stop();
|
||||
_driver->stop();
|
||||
_uncommitter->stop();
|
||||
_stat->stop();
|
||||
ZStopConcurrentGCThreadClosure cl;
|
||||
gc_threads_do(&cl);
|
||||
}
|
||||
|
||||
SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
|
||||
@ -278,9 +287,8 @@ jlong ZCollectedHeap::millis_since_last_gc() {
|
||||
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
tc->do_thread(_director);
|
||||
tc->do_thread(_driver);
|
||||
tc->do_thread(_uncommitter);
|
||||
tc->do_thread(_stat);
|
||||
_heap.worker_threads_do(tc);
|
||||
_heap.threads_do(tc);
|
||||
_runtime_workers.threads_do(tc);
|
||||
}
|
||||
|
||||
|
@ -27,13 +27,13 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/softRefPolicy.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zDriver.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zRuntimeWorkers.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
|
||||
class ZDirector;
|
||||
class ZDriver;
|
||||
class ZStat;
|
||||
|
||||
class ZCollectedHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
@ -45,7 +45,6 @@ private:
|
||||
ZHeap _heap;
|
||||
ZDirector* _director;
|
||||
ZDriver* _driver;
|
||||
ZUncommitter* _uncommitter;
|
||||
ZStat* _stat;
|
||||
ZRuntimeWorkers _runtime_workers;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,6 @@ public:
|
||||
ZFuture();
|
||||
|
||||
void set(T value);
|
||||
T peek();
|
||||
T get();
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,11 +41,6 @@ inline void ZFuture<T>::set(T value) {
|
||||
_sema.signal();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T ZFuture<T>::peek() {
|
||||
return _value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T ZFuture<T>::get() {
|
||||
// Wait for notification
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zHeapIterator.hpp"
|
||||
#include "gc/z/zHeuristics.hpp"
|
||||
#include "gc/z/zMark.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageTable.inline.hpp"
|
||||
@ -57,7 +58,7 @@ ZHeap* ZHeap::_heap = NULL;
|
||||
ZHeap::ZHeap() :
|
||||
_workers(),
|
||||
_object_allocator(),
|
||||
_page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
|
||||
_page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
|
||||
_page_table(),
|
||||
_forwarding_table(),
|
||||
_mark(&_workers, &_page_table),
|
||||
@ -66,32 +67,13 @@ ZHeap::ZHeap() :
|
||||
_relocate(&_workers),
|
||||
_relocation_set(),
|
||||
_unload(&_workers),
|
||||
_serviceability(heap_min_size(), heap_max_size()) {
|
||||
_serviceability(min_capacity(), max_capacity()) {
|
||||
// Install global heap instance
|
||||
assert(_heap == NULL, "Already initialized");
|
||||
_heap = this;
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_min_size() const {
|
||||
return MinHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_initial_size() const {
|
||||
return InitialHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_size() const {
|
||||
return MaxHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_reserve_size() const {
|
||||
// Reserve one small page per worker plus one shared medium page. This is still just
|
||||
// an estimate and doesn't guarantee that we can't run out of memory during relocation.
|
||||
const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
return MIN2(max_reserve_size, heap_max_size());
|
||||
ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
|
||||
}
|
||||
|
||||
bool ZHeap::is_initialized() const {
|
||||
@ -198,7 +180,8 @@ void ZHeap::set_boost_worker_threads(bool boost) {
|
||||
_workers.set_boost(boost);
|
||||
}
|
||||
|
||||
void ZHeap::worker_threads_do(ThreadClosure* tc) const {
|
||||
void ZHeap::threads_do(ThreadClosure* tc) const {
|
||||
_page_allocator.threads_do(tc);
|
||||
_workers.threads_do(tc);
|
||||
}
|
||||
|
||||
@ -237,10 +220,6 @@ void ZHeap::free_page(ZPage* page, bool reclaimed) {
|
||||
_page_allocator.free_page(page, reclaimed);
|
||||
}
|
||||
|
||||
uint64_t ZHeap::uncommit(uint64_t delay) {
|
||||
return _page_allocator.uncommit(delay);
|
||||
}
|
||||
|
||||
void ZHeap::flip_to_marked() {
|
||||
ZVerifyViewsFlip flip(&_page_allocator);
|
||||
ZAddress::flip_to_marked();
|
||||
|
@ -60,11 +60,6 @@ private:
|
||||
ZUnload _unload;
|
||||
ZServiceability _serviceability;
|
||||
|
||||
size_t heap_min_size() const;
|
||||
size_t heap_initial_size() const;
|
||||
size_t heap_max_size() const;
|
||||
size_t heap_max_reserve_size() const;
|
||||
|
||||
void flip_to_marked();
|
||||
void flip_to_remapped();
|
||||
|
||||
@ -99,11 +94,11 @@ public:
|
||||
bool is_in(uintptr_t addr) const;
|
||||
uint32_t hash_oop(uintptr_t addr) const;
|
||||
|
||||
// Workers
|
||||
// Threads
|
||||
uint nconcurrent_worker_threads() const;
|
||||
uint nconcurrent_no_boost_worker_threads() const;
|
||||
void set_boost_worker_threads(bool boost);
|
||||
void worker_threads_do(ThreadClosure* tc) const;
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
|
||||
// Reference processing
|
||||
ReferenceDiscoverer* reference_discoverer();
|
||||
@ -117,9 +112,6 @@ public:
|
||||
void undo_alloc_page(ZPage* page);
|
||||
void free_page(ZPage* page, bool reclaimed);
|
||||
|
||||
// Uncommit memory
|
||||
uint64_t uncommit(uint64_t delay);
|
||||
|
||||
// Object allocation
|
||||
uintptr_t alloc_tlab(size_t size);
|
||||
uintptr_t alloc_object(size_t size);
|
||||
|
@ -49,13 +49,18 @@ void ZHeuristics::set_medium_page_size() {
|
||||
ZObjectSizeLimitMedium = ZPageSizeMedium / 8;
|
||||
ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13;
|
||||
ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift;
|
||||
|
||||
log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
|
||||
} else {
|
||||
log_info_p(gc, init)("Medium Page Size: N/A");
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZHeuristics::max_reserve() {
|
||||
// Reserve one small page per worker plus one shared medium page. This is
|
||||
// still just an estimate and doesn't guarantee that we can't run out of
|
||||
// memory during relocation.
|
||||
const uint nworkers = MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
const size_t reserve = (nworkers * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
return MIN2(MaxHeapSize, reserve);
|
||||
}
|
||||
|
||||
bool ZHeuristics::use_per_cpu_shared_small_pages() {
|
||||
// Use per-CPU shared small pages only if these pages occupy at most 3.125%
|
||||
// of the max heap size. Otherwise fall back to using a single shared small
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,8 @@ class ZHeuristics : public AllStatic {
|
||||
public:
|
||||
static void set_medium_page_size();
|
||||
|
||||
static size_t max_reserve();
|
||||
|
||||
static bool use_per_cpu_shared_small_pages();
|
||||
|
||||
static uint nparallel_workers();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,7 +50,6 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
|
||||
ZThreadLocalAllocBuffer::initialize();
|
||||
ZTracer::initialize();
|
||||
ZLargePages::initialize();
|
||||
ZHeuristics::set_medium_page_size();
|
||||
ZBarrierSet::set_barrier_set(barrier_set);
|
||||
|
||||
initialize_os();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
|
||||
@ -86,6 +87,8 @@ void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
@ -109,6 +112,8 @@ uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZMemory* area = _freelist.first();
|
||||
if (area != NULL) {
|
||||
if (area->size() <= size) {
|
||||
@ -133,6 +138,8 @@ uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocate
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListReverseIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
@ -155,6 +162,8 @@ uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
|
||||
}
|
||||
|
||||
uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZMemory* area = _freelist.last();
|
||||
if (area != NULL) {
|
||||
if (area->size() <= size) {
|
||||
@ -181,6 +190,8 @@ void ZMemoryManager::free(uintptr_t start, size_t size) {
|
||||
assert(start != UINTPTR_MAX, "Invalid address");
|
||||
const uintptr_t end = start + size;
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (start < area->start()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#define SHARE_GC_Z_ZMEMORY_HPP
|
||||
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZMemory : public CHeapObj<mtGC> {
|
||||
@ -65,6 +66,7 @@ public:
|
||||
};
|
||||
|
||||
private:
|
||||
ZLock _lock;
|
||||
ZList<ZMemory> _freelist;
|
||||
Callbacks _callbacks;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,7 @@ ZPage::~ZPage() {}
|
||||
void ZPage::assert_initialized() const {
|
||||
assert(!_virtual.is_null(), "Should not be null");
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch");
|
||||
assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
|
||||
(_type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
|
||||
(_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
|
||||
@ -99,6 +100,27 @@ ZPage* ZPage::split(uint8_t type, size_t size) {
|
||||
return page;
|
||||
}
|
||||
|
||||
ZPage* ZPage::split_committed() {
|
||||
// Split any committed part of this page into a separate page,
|
||||
// leaving this page with only uncommitted physical memory.
|
||||
const ZPhysicalMemory pmem = _physical.split_committed();
|
||||
if (pmem.is_null()) {
|
||||
// Nothing committed
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
|
||||
// Resize this page
|
||||
const ZVirtualMemory vmem = _virtual.split(pmem.size());
|
||||
_type = type_from_size(_virtual.size());
|
||||
_top = start();
|
||||
_livemap.resize(object_max_count());
|
||||
|
||||
// Create new page
|
||||
return new ZPage(vmem, pmem);
|
||||
}
|
||||
|
||||
void ZPage::print_on(outputStream* out) const {
|
||||
out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s",
|
||||
type_to_string(), start(), top(), end(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,17 +69,15 @@ public:
|
||||
uintptr_t top() const;
|
||||
size_t remaining() const;
|
||||
|
||||
const ZPhysicalMemory& physical_memory() const;
|
||||
const ZVirtualMemory& virtual_memory() const;
|
||||
const ZPhysicalMemory& physical_memory() const;
|
||||
ZPhysicalMemory& physical_memory();
|
||||
|
||||
uint8_t numa_id();
|
||||
|
||||
bool is_allocating() const;
|
||||
bool is_relocatable() const;
|
||||
|
||||
bool is_mapped() const;
|
||||
void set_pre_mapped();
|
||||
|
||||
uint64_t last_used() const;
|
||||
void set_last_used();
|
||||
|
||||
@ -88,6 +86,7 @@ public:
|
||||
ZPage* retype(uint8_t type);
|
||||
ZPage* split(size_t size);
|
||||
ZPage* split(uint8_t type, size_t size);
|
||||
ZPage* split_committed();
|
||||
|
||||
bool is_in(uintptr_t addr) const;
|
||||
|
||||
|
@ -126,12 +126,16 @@ inline size_t ZPage::remaining() const {
|
||||
return end() - top();
|
||||
}
|
||||
|
||||
inline const ZVirtualMemory& ZPage::virtual_memory() const {
|
||||
return _virtual;
|
||||
}
|
||||
|
||||
inline const ZPhysicalMemory& ZPage::physical_memory() const {
|
||||
return _physical;
|
||||
}
|
||||
|
||||
inline const ZVirtualMemory& ZPage::virtual_memory() const {
|
||||
return _virtual;
|
||||
inline ZPhysicalMemory& ZPage::physical_memory() {
|
||||
return _physical;
|
||||
}
|
||||
|
||||
inline uint8_t ZPage::numa_id() {
|
||||
@ -150,17 +154,6 @@ inline bool ZPage::is_relocatable() const {
|
||||
return _seqnum < ZGlobalSeqNum;
|
||||
}
|
||||
|
||||
inline bool ZPage::is_mapped() const {
|
||||
return _seqnum > 0;
|
||||
}
|
||||
|
||||
inline void ZPage::set_pre_mapped() {
|
||||
// The _seqnum variable is also used to signal that the virtual and physical
|
||||
// memory has been mapped. So, we need to set it to non-zero when the memory
|
||||
// has been pre-mapped.
|
||||
_seqnum = 1;
|
||||
}
|
||||
|
||||
inline uint64_t ZPage::last_used() const {
|
||||
return _last_used;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -31,60 +31,70 @@
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "gc/z/zSafeDelete.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZPageAllocRequest;
|
||||
class ThreadClosure;
|
||||
class ZPageAllocation;
|
||||
class ZWorkers;
|
||||
class ZUncommitter;
|
||||
|
||||
class ZPageAllocator {
|
||||
friend class VMStructs;
|
||||
friend class ZUncommitter;
|
||||
|
||||
private:
|
||||
ZLock _lock;
|
||||
ZPageCache _cache;
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
ZPageCache _cache;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
const size_t _max_reserve;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
volatile size_t _current_max_capacity;
|
||||
volatile size_t _capacity;
|
||||
volatile size_t _claimed;
|
||||
volatile size_t _used;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _used;
|
||||
size_t _allocated;
|
||||
ssize_t _reclaimed;
|
||||
ZList<ZPageAllocRequest> _queue;
|
||||
ZList<ZPageAllocRequest> _satisfied;
|
||||
ZList<ZPageAllocation> _stalled;
|
||||
ZList<ZPageAllocation> _satisfied;
|
||||
ZUncommitter* _uncommitter;
|
||||
mutable ZSafeDelete<ZPage> _safe_delete;
|
||||
bool _uncommit;
|
||||
bool _initialized;
|
||||
|
||||
static ZPage* const gc_marker;
|
||||
bool prime_cache(ZWorkers* workers, size_t size);
|
||||
|
||||
void prime_cache(ZWorkers* workers, size_t size);
|
||||
size_t increase_capacity(size_t size);
|
||||
void decrease_capacity(size_t size, bool set_max_capacity);
|
||||
|
||||
void increase_used(size_t size, bool relocation);
|
||||
void decrease_used(size_t size, bool reclaimed);
|
||||
|
||||
ZPage* create_page(uint8_t type, size_t size);
|
||||
bool commit_page(ZPage* page);
|
||||
void uncommit_page(ZPage* page);
|
||||
|
||||
void map_page(const ZPage* page) const;
|
||||
void unmap_page(const ZPage* page) const;
|
||||
|
||||
void destroy_page(ZPage* page);
|
||||
|
||||
size_t max_available(bool no_reserve) const;
|
||||
bool ensure_available(size_t size, bool no_reserve);
|
||||
void ensure_uncached_available(size_t size);
|
||||
bool is_alloc_allowed(size_t size, bool no_reserve) const;
|
||||
bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const;
|
||||
|
||||
void check_out_of_memory_during_initialization();
|
||||
bool alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages);
|
||||
bool alloc_page_common(ZPageAllocation* allocation);
|
||||
bool alloc_page_stall(ZPageAllocation* allocation);
|
||||
bool alloc_page_or_stall(ZPageAllocation* allocation);
|
||||
ZPage* alloc_page_create(ZPageAllocation* allocation);
|
||||
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
|
||||
void alloc_page_failed(ZPageAllocation* allocation);
|
||||
|
||||
ZPage* alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve);
|
||||
ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
void satisfy_stalled();
|
||||
|
||||
size_t flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation);
|
||||
void flush_cache_for_allocation(size_t requested);
|
||||
void free_page_inner(ZPage* page, bool reclaimed);
|
||||
|
||||
void satisfy_alloc_queue();
|
||||
size_t uncommit(uint64_t* timeout);
|
||||
|
||||
public:
|
||||
ZPageAllocator(ZWorkers* workers,
|
||||
@ -112,13 +122,9 @@ public:
|
||||
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
void free_page(ZPage* page, bool reclaimed);
|
||||
|
||||
uint64_t uncommit(uint64_t delay);
|
||||
|
||||
void enable_deferred_delete() const;
|
||||
void disable_deferred_delete() const;
|
||||
|
||||
void map_page(const ZPage* page) const;
|
||||
|
||||
void debug_map_page(const ZPage* page) const;
|
||||
void debug_unmap_page(const ZPage* page) const;
|
||||
|
||||
@ -126,6 +132,8 @@ public:
|
||||
void check_out_of_memory();
|
||||
|
||||
void pages_do(ZPageClosure* cl) const;
|
||||
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
@ -29,25 +30,36 @@
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
|
||||
|
||||
class ZPageCacheFlushClosure : public StackObj {
|
||||
friend class ZPageCache;
|
||||
|
||||
protected:
|
||||
const size_t _requested;
|
||||
size_t _flushed;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushClosure(size_t requested);
|
||||
virtual bool do_page(const ZPage* page) = 0;
|
||||
};
|
||||
|
||||
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
|
||||
_requested(requested),
|
||||
_flushed(0) {}
|
||||
|
||||
size_t ZPageCacheFlushClosure::overflushed() const {
|
||||
return _flushed > _requested ? _flushed - _requested : 0;
|
||||
}
|
||||
|
||||
ZPageCache::ZPageCache() :
|
||||
_available(0),
|
||||
_small(),
|
||||
_medium(),
|
||||
_large() {}
|
||||
_large(),
|
||||
_last_commit(0) {}
|
||||
|
||||
ZPage* ZPageCache::alloc_small_page() {
|
||||
const uint32_t numa_id = ZNUMA::id();
|
||||
@ -161,7 +173,7 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
|
||||
page = oversized->split(type, size);
|
||||
|
||||
// Cache remainder
|
||||
free_page_inner(oversized);
|
||||
free_page(oversized);
|
||||
} else {
|
||||
// Re-type correctly sized page
|
||||
page = oversized->retype(type);
|
||||
@ -169,16 +181,14 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
if (page != NULL) {
|
||||
_available -= page->size();
|
||||
} else {
|
||||
if (page == NULL) {
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPageCache::free_page_inner(ZPage* page) {
|
||||
void ZPageCache::free_page(ZPage* page) {
|
||||
const uint8_t type = page->type();
|
||||
if (type == ZPageTypeSmall) {
|
||||
_small.get(page->numa_id()).insert_first(page);
|
||||
@ -189,11 +199,6 @@ void ZPageCache::free_page_inner(ZPage* page) {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageCache::free_page(ZPage* page) {
|
||||
free_page_inner(page);
|
||||
_available += page->size();
|
||||
}
|
||||
|
||||
bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
|
||||
ZPage* const page = from->last();
|
||||
if (page == NULL || !cl->do_page(page)) {
|
||||
@ -202,7 +207,6 @@ bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from
|
||||
}
|
||||
|
||||
// Flush page
|
||||
_available -= page->size();
|
||||
from->remove(page);
|
||||
to->insert_last(page);
|
||||
return true;
|
||||
@ -239,6 +243,94 @@ void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
|
||||
flush_list(cl, &_large, to);
|
||||
flush_list(cl, &_medium, to);
|
||||
flush_per_numa_lists(cl, &_small, to);
|
||||
|
||||
if (cl->_flushed > cl->_requested) {
|
||||
// Overflushed, re-insert part of last page into the cache
|
||||
const size_t overflushed = cl->_flushed - cl->_requested;
|
||||
ZPage* const reinsert = to->last()->split(overflushed);
|
||||
free_page(reinsert);
|
||||
cl->_flushed -= overflushed;
|
||||
}
|
||||
}
|
||||
|
||||
class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
|
||||
public:
|
||||
ZPageCacheFlushForAllocationClosure(size_t requested) :
|
||||
ZPageCacheFlushClosure(requested) {}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
if (_flushed < _requested) {
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't flush page
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) {
|
||||
ZPageCacheFlushForAllocationClosure cl(requested);
|
||||
flush(&cl, to);
|
||||
}
|
||||
|
||||
class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
|
||||
private:
|
||||
const uint64_t _now;
|
||||
uint64_t* _timeout;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) :
|
||||
ZPageCacheFlushClosure(requested),
|
||||
_now(now),
|
||||
_timeout(timeout) {
|
||||
// Set initial timeout
|
||||
*_timeout = ZUncommitDelay;
|
||||
}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
const uint64_t expires = page->last_used() + ZUncommitDelay;
|
||||
if (expires > _now) {
|
||||
// Don't flush page, record shortest non-expired timeout
|
||||
*_timeout = MIN2(*_timeout, expires - _now);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_flushed >= _requested) {
|
||||
// Don't flush page, requested amount flushed
|
||||
return false;
|
||||
}
|
||||
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout) {
|
||||
const uint64_t now = os::elapsedTime();
|
||||
const uint64_t expires = _last_commit + ZUncommitDelay;
|
||||
if (expires > now) {
|
||||
// Delay uncommit, set next timeout
|
||||
*timeout = expires - now;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (requested == 0) {
|
||||
// Nothing to flush, set next timeout
|
||||
*timeout = ZUncommitDelay;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZPageCacheFlushForUncommitClosure cl(requested, now, timeout);
|
||||
flush(&cl, to);
|
||||
|
||||
return cl._flushed;
|
||||
}
|
||||
|
||||
void ZPageCache::set_last_commit() {
|
||||
_last_commit = os::elapsedTime();
|
||||
}
|
||||
|
||||
void ZPageCache::pages_do(ZPageClosure* cl) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,25 +27,15 @@
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zPage.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZPageCacheFlushClosure : public StackObj {
|
||||
protected:
|
||||
const size_t _requested;
|
||||
size_t _flushed;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushClosure(size_t requested);
|
||||
size_t overflushed() const;
|
||||
virtual bool do_page(const ZPage* page) = 0;
|
||||
};
|
||||
class ZPageCacheFlushClosure;
|
||||
|
||||
class ZPageCache {
|
||||
private:
|
||||
size_t _available;
|
||||
ZPerNUMA<ZList<ZPage> > _small;
|
||||
ZList<ZPage> _medium;
|
||||
ZList<ZPage> _large;
|
||||
uint64_t _last_commit;
|
||||
|
||||
ZPage* alloc_small_page();
|
||||
ZPage* alloc_medium_page();
|
||||
@ -55,21 +45,21 @@ private:
|
||||
ZPage* alloc_oversized_large_page(size_t size);
|
||||
ZPage* alloc_oversized_page(size_t size);
|
||||
|
||||
void free_page_inner(ZPage* page);
|
||||
|
||||
bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to);
|
||||
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
|
||||
|
||||
public:
|
||||
ZPageCache();
|
||||
|
||||
size_t available() const;
|
||||
|
||||
ZPage* alloc_page(uint8_t type, size_t size);
|
||||
void free_page(ZPage* page);
|
||||
|
||||
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
|
||||
void flush_for_allocation(size_t requested, ZList<ZPage>* to);
|
||||
size_t flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout);
|
||||
|
||||
void set_last_commit();
|
||||
|
||||
void pages_do(ZPageClosure* cl) const;
|
||||
};
|
||||
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
|
||||
inline size_t ZPageCache::available() const {
|
||||
return _available;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
@ -27,92 +27,212 @@
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory() :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
add_segment(segment);
|
||||
}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
|
||||
// Copy segments
|
||||
for (size_t i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
add_segments(pmem);
|
||||
}
|
||||
|
||||
const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
|
||||
// Free segments
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments = 0;
|
||||
|
||||
// Copy segments
|
||||
for (size_t i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
|
||||
remove_segments();
|
||||
add_segments(pmem);
|
||||
return *this;
|
||||
}
|
||||
|
||||
ZPhysicalMemory::~ZPhysicalMemory() {
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments = 0;
|
||||
remove_segments();
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemory::size() const {
|
||||
size_t size = 0;
|
||||
|
||||
for (size_t i = 0; i < _nsegments; i++) {
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
size += _segments[i].size();
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
|
||||
assert(index <= _nsegments, "Invalid index");
|
||||
|
||||
ZPhysicalMemorySegment* const from_segments = _segments;
|
||||
|
||||
if (_nsegments + 1 > _nsegments_max) {
|
||||
// Resize array
|
||||
_nsegments_max = round_up_power_of_2(_nsegments_max + 1);
|
||||
_segments = new ZPhysicalMemorySegment[_nsegments_max];
|
||||
|
||||
// Copy segments before index
|
||||
for (uint32_t i = 0; i < index; i++) {
|
||||
_segments[i] = from_segments[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Copy/Move segments after index
|
||||
for (uint32_t i = _nsegments; i > index; i--) {
|
||||
_segments[i] = from_segments[i - 1];
|
||||
}
|
||||
|
||||
// Insert new segment
|
||||
_segments[index] = ZPhysicalMemorySegment(start, size, committed);
|
||||
_nsegments++;
|
||||
|
||||
// Delete old array
|
||||
if (from_segments != _segments) {
|
||||
delete [] from_segments;
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
_segments[index] = ZPhysicalMemorySegment(start, size, committed);;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segment(uint32_t index) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
|
||||
// Move segments after index
|
||||
for (uint32_t i = index + 1; i < _nsegments; i++) {
|
||||
_segments[i - 1] = _segments[i];
|
||||
}
|
||||
|
||||
_nsegments--;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segments() {
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments_max = 0;
|
||||
_nsegments = 0;
|
||||
}
|
||||
|
||||
static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
|
||||
return before.end() == after.start() && before.is_committed() == after.is_committed();
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
// Try merge with last segment
|
||||
if (_nsegments > 0) {
|
||||
ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
|
||||
assert(last.end() <= segment.start(), "Segments added out of order");
|
||||
if (last.end() == segment.start()) {
|
||||
last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
|
||||
// Insert segments in address order, merge segments when possible
|
||||
for (uint32_t i = _nsegments; i > 0; i--) {
|
||||
const uint32_t current = i - 1;
|
||||
|
||||
if (_segments[current].end() <= segment.start()) {
|
||||
if (is_mergable(_segments[current], segment)) {
|
||||
if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
|
||||
// Merge with end of current segment and start of next segment
|
||||
const size_t start = _segments[current].start();
|
||||
const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
remove_segment(current + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge with end of current segment
|
||||
const size_t start = _segments[current].start();
|
||||
const size_t size = _segments[current].size() + segment.size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
return;
|
||||
} else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
|
||||
// Merge with start of next segment
|
||||
const size_t start = segment.start();
|
||||
const size_t size = segment.size() + _segments[current + 1].size();
|
||||
replace_segment(current + 1, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert after current segment
|
||||
insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Resize array
|
||||
ZPhysicalMemorySegment* const old_segments = _segments;
|
||||
_segments = new ZPhysicalMemorySegment[_nsegments + 1];
|
||||
for (size_t i = 0; i < _nsegments; i++) {
|
||||
_segments[i] = old_segments[i];
|
||||
if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
|
||||
// Merge with start of first segment
|
||||
const size_t start = segment.start();
|
||||
const size_t size = segment.size() + _segments[0].size();
|
||||
replace_segment(0, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
delete [] old_segments;
|
||||
|
||||
// Add new segment
|
||||
_segments[_nsegments] = segment;
|
||||
_nsegments++;
|
||||
// Insert before first segment
|
||||
insert_segment(0, segment.start(), segment.size(), segment.is_committed());
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
assert(size <= _segments[index].size(), "Invalid size");
|
||||
assert(!_segments[index].is_committed(), "Invalid state");
|
||||
|
||||
if (size == _segments[index].size()) {
|
||||
// Completely committed
|
||||
_segments[index].set_committed(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially committed, split segment
|
||||
insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
|
||||
replace_segment(index, _segments[index].start(), size, true /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
assert(size <= _segments[index].size(), "Invalid size");
|
||||
assert(_segments[index].is_committed(), "Invalid state");
|
||||
|
||||
if (size == _segments[index].size()) {
|
||||
// Completely uncommitted
|
||||
_segments[index].set_committed(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially uncommitted, split segment
|
||||
insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
|
||||
replace_segment(index, _segments[index].start(), size, false /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
ZPhysicalMemory pmem;
|
||||
size_t nsegments = 0;
|
||||
uint32_t nsegments = 0;
|
||||
|
||||
for (size_t i = 0; i < _nsegments; i++) {
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments[i];
|
||||
if (pmem.size() < size) {
|
||||
if (pmem.size() + segment.size() <= size) {
|
||||
@ -121,8 +241,8 @@ ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
} else {
|
||||
// Split segment
|
||||
const size_t split_size = size - pmem.size();
|
||||
pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
|
||||
_segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
|
||||
pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
|
||||
_segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
|
||||
}
|
||||
} else {
|
||||
// Keep segment
|
||||
@ -135,25 +255,68 @@ ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
return pmem;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split_committed() {
|
||||
ZPhysicalMemory pmem;
|
||||
uint32_t nsegments = 0;
|
||||
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments[i];
|
||||
if (segment.is_committed()) {
|
||||
// Transfer segment
|
||||
pmem.add_segment(segment);
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments[nsegments++] = segment;
|
||||
}
|
||||
}
|
||||
|
||||
_nsegments = nsegments;
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
|
||||
_backing(max_capacity) {
|
||||
// Register everything as uncommitted
|
||||
_uncommitted.free(0, max_capacity);
|
||||
// Make the whole range free
|
||||
_manager.free(0, max_capacity);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
|
||||
_backing.warn_commit_limits(max);
|
||||
void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
|
||||
_backing.warn_commit_limits(max_capacity);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::supports_uncommit() {
|
||||
void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
|
||||
assert(!is_init_completed(), "Invalid state");
|
||||
|
||||
// Test if uncommit is supported by uncommitting and then re-committing a granule
|
||||
return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
|
||||
// If uncommit is not explicitly disabled, max capacity is greater than
|
||||
// min capacity, and uncommit is supported by the platform, then uncommit
|
||||
// will be enabled.
|
||||
if (!ZUncommit) {
|
||||
log_info(gc, init)("Uncommit: Disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (max_capacity == min_capacity) {
|
||||
log_info(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Test if uncommit is supported by the operating system by committing
|
||||
// and then uncommitting a granule.
|
||||
ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
|
||||
if (!commit(pmem) || !uncommit(pmem)) {
|
||||
log_info(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
log_info(gc, init)("Uncommit: Enabled");
|
||||
log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
@ -172,99 +335,79 @@ void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryManager::commit(size_t size) {
|
||||
size_t committed = 0;
|
||||
|
||||
// Fill holes in the backing memory
|
||||
while (committed < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - committed;
|
||||
const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
|
||||
if (start == UINTPTR_MAX) {
|
||||
// No holes to commit
|
||||
break;
|
||||
}
|
||||
|
||||
// Try commit hole
|
||||
const size_t filled = _backing.commit(start, allocated);
|
||||
if (filled > 0) {
|
||||
// Successful or partialy successful
|
||||
_committed.free(start, filled);
|
||||
committed += filled;
|
||||
}
|
||||
if (filled < allocated) {
|
||||
// Failed or partialy failed
|
||||
_uncommitted.free(start + filled, allocated - filled);
|
||||
return committed;
|
||||
}
|
||||
}
|
||||
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryManager::uncommit(size_t size) {
|
||||
size_t uncommitted = 0;
|
||||
|
||||
// Punch holes in backing memory
|
||||
while (uncommitted < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - uncommitted;
|
||||
const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
|
||||
// Try punch hole
|
||||
const size_t punched = _backing.uncommit(start, allocated);
|
||||
if (punched > 0) {
|
||||
// Successful or partialy successful
|
||||
_uncommitted.free(start, punched);
|
||||
uncommitted += punched;
|
||||
}
|
||||
if (punched < allocated) {
|
||||
// Failed or partialy failed
|
||||
_committed.free(start + punched, allocated - punched);
|
||||
return uncommitted;
|
||||
}
|
||||
}
|
||||
|
||||
return uncommitted;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
|
||||
void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
||||
assert(is_aligned(size, ZGranuleSize), "Invalid size");
|
||||
|
||||
ZPhysicalMemory pmem;
|
||||
|
||||
// Allocate segments
|
||||
for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
|
||||
const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
|
||||
while (size > 0) {
|
||||
size_t allocated = 0;
|
||||
const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
|
||||
size -= allocated;
|
||||
}
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
|
||||
// Free segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_committed.free(segment.start(), segment.size());
|
||||
_manager.free(segment.start(), segment.size());
|
||||
}
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (segment.is_committed()) {
|
||||
// Segment already committed
|
||||
continue;
|
||||
}
|
||||
|
||||
// Commit segment
|
||||
const size_t committed = _backing.commit(segment.start(), segment.size());
|
||||
if (!pmem.commit_segment(i, committed)) {
|
||||
// Failed or partially failed
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (!segment.is_committed()) {
|
||||
// Segment already uncommitted
|
||||
continue;
|
||||
}
|
||||
|
||||
// Uncommit segment
|
||||
const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
|
||||
if (!pmem.uncommit_segment(i, uncommitted)) {
|
||||
// Failed or partially failed
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
|
||||
const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
|
||||
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
size_t size = 0;
|
||||
|
||||
// Map segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_backing.map(addr + size, segment.size(), segment.start());
|
||||
size += segment.size();
|
||||
|
@ -32,21 +32,30 @@ class ZPhysicalMemorySegment : public CHeapObj<mtGC> {
|
||||
private:
|
||||
uintptr_t _start;
|
||||
uintptr_t _end;
|
||||
bool _committed;
|
||||
|
||||
public:
|
||||
ZPhysicalMemorySegment();
|
||||
ZPhysicalMemorySegment(uintptr_t start, size_t size);
|
||||
ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed);
|
||||
|
||||
uintptr_t start() const;
|
||||
uintptr_t end() const;
|
||||
size_t size() const;
|
||||
|
||||
bool is_committed() const;
|
||||
void set_committed(bool committed);
|
||||
};
|
||||
|
||||
class ZPhysicalMemory {
|
||||
private:
|
||||
size_t _nsegments;
|
||||
uint32_t _nsegments_max;
|
||||
uint32_t _nsegments;
|
||||
ZPhysicalMemorySegment* _segments;
|
||||
|
||||
void insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
|
||||
void replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
|
||||
void remove_segment(uint32_t index);
|
||||
|
||||
public:
|
||||
ZPhysicalMemory();
|
||||
ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
|
||||
@ -57,18 +66,24 @@ public:
|
||||
bool is_null() const;
|
||||
size_t size() const;
|
||||
|
||||
size_t nsegments() const;
|
||||
const ZPhysicalMemorySegment& segment(size_t index) const;
|
||||
uint32_t nsegments() const;
|
||||
const ZPhysicalMemorySegment& segment(uint32_t index) const;
|
||||
|
||||
void add_segments(const ZPhysicalMemory& pmem);
|
||||
void remove_segments();
|
||||
|
||||
void add_segment(const ZPhysicalMemorySegment& segment);
|
||||
bool commit_segment(uint32_t index, size_t size);
|
||||
bool uncommit_segment(uint32_t index, size_t size);
|
||||
|
||||
ZPhysicalMemory split(size_t size);
|
||||
ZPhysicalMemory split_committed();
|
||||
};
|
||||
|
||||
class ZPhysicalMemoryManager {
|
||||
private:
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
ZMemoryManager _committed;
|
||||
ZMemoryManager _uncommitted;
|
||||
ZMemoryManager _manager;
|
||||
|
||||
void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
@ -82,15 +97,15 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max) const;
|
||||
bool supports_uncommit();
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
void try_enable_uncommit(size_t min_capacity, size_t max_capacity);
|
||||
|
||||
size_t commit(size_t size);
|
||||
size_t uncommit(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void alloc(ZPhysicalMemory& pmem, size_t size);
|
||||
void free(const ZPhysicalMemory& pmem);
|
||||
|
||||
bool commit(ZPhysicalMemory& pmem);
|
||||
bool uncommit(ZPhysicalMemory& pmem);
|
||||
|
||||
void pretouch(uintptr_t offset, size_t size) const;
|
||||
|
||||
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,11 +29,13 @@
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() :
|
||||
_start(UINTPTR_MAX),
|
||||
_end(UINTPTR_MAX) {}
|
||||
_end(UINTPTR_MAX),
|
||||
_committed(false) {}
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) :
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) :
|
||||
_start(start),
|
||||
_end(start + size) {}
|
||||
_end(start + size),
|
||||
_committed(committed) {}
|
||||
|
||||
inline uintptr_t ZPhysicalMemorySegment::start() const {
|
||||
return _start;
|
||||
@ -47,15 +49,23 @@ inline size_t ZPhysicalMemorySegment::size() const {
|
||||
return _end - _start;
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemorySegment::is_committed() const {
|
||||
return _committed;
|
||||
}
|
||||
|
||||
inline void ZPhysicalMemorySegment::set_committed(bool committed) {
|
||||
_committed = committed;
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemory::is_null() const {
|
||||
return _nsegments == 0;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemory::nsegments() const {
|
||||
inline uint32_t ZPhysicalMemory::nsegments() const {
|
||||
return _nsegments;
|
||||
}
|
||||
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(size_t index) const {
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(uint32_t index) const {
|
||||
assert(index < _nsegments, "Invalid segment index");
|
||||
return _segments[index];
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,52 +23,73 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
ZUncommitter::ZUncommitter() :
|
||||
_monitor(Monitor::leaf, "ZUncommitter", false, Monitor::_safepoint_check_never),
|
||||
static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
|
||||
|
||||
ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator) :
|
||||
_page_allocator(page_allocator),
|
||||
_lock(),
|
||||
_stop(false) {
|
||||
set_name("ZUncommitter");
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
bool ZUncommitter::idle(uint64_t timeout) {
|
||||
// Idle for at least one second
|
||||
const uint64_t expires = os::elapsedTime() + MAX2<uint64_t>(timeout, 1);
|
||||
|
||||
for (;;) {
|
||||
// We might wake up spuriously from wait, so always recalculate
|
||||
// the timeout after a wakeup to see if we need to wait again.
|
||||
const uint64_t now = os::elapsedTime();
|
||||
const uint64_t remaining = expires - MIN2(expires, now);
|
||||
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
if (remaining > 0 && !_stop) {
|
||||
ml.wait(remaining * MILLIUNITS);
|
||||
} else {
|
||||
return !_stop;
|
||||
}
|
||||
bool ZUncommitter::wait(uint64_t timeout) const {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
while (!ZUncommit && !_stop) {
|
||||
_lock.wait();
|
||||
}
|
||||
|
||||
if (!_stop && timeout > 0) {
|
||||
log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
|
||||
_lock.wait(timeout * MILLIUNITS);
|
||||
}
|
||||
|
||||
return !_stop;
|
||||
}
|
||||
|
||||
bool ZUncommitter::should_continue() const {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
return !_stop;
|
||||
}
|
||||
|
||||
void ZUncommitter::run_service() {
|
||||
for (;;) {
|
||||
// Try uncommit unused memory
|
||||
const uint64_t timeout = ZHeap::heap()->uncommit(ZUncommitDelay);
|
||||
uint64_t timeout = 0;
|
||||
|
||||
log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
|
||||
while (wait(timeout)) {
|
||||
EventZUncommit event;
|
||||
size_t uncommitted = 0;
|
||||
|
||||
// Idle until next attempt
|
||||
if (!idle(timeout)) {
|
||||
return;
|
||||
while (should_continue()) {
|
||||
// Uncommit chunk
|
||||
const size_t flushed = _page_allocator->uncommit(&timeout);
|
||||
if (flushed == 0) {
|
||||
// Done
|
||||
break;
|
||||
}
|
||||
|
||||
uncommitted += flushed;
|
||||
}
|
||||
|
||||
if (uncommitted > 0) {
|
||||
// Update statistics
|
||||
ZStatInc(ZCounterUncommit, uncommitted);
|
||||
log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
|
||||
uncommitted / M, percent_of(uncommitted, ZHeap::heap()->max_capacity()));
|
||||
|
||||
// Send event
|
||||
event.commit(uncommitted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::stop_service() {
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
_stop = true;
|
||||
ml.notify();
|
||||
_lock.notify_all();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,22 +24,26 @@
|
||||
#ifndef SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
#define SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
||||
class ZPageAllocation;
|
||||
|
||||
class ZUncommitter : public ConcurrentGCThread {
|
||||
private:
|
||||
Monitor _monitor;
|
||||
bool _stop;
|
||||
ZPageAllocator* const _page_allocator;
|
||||
mutable ZConditionLock _lock;
|
||||
bool _stop;
|
||||
|
||||
bool idle(uint64_t timeout);
|
||||
bool wait(uint64_t timeout) const;
|
||||
bool should_continue() const;
|
||||
|
||||
protected:
|
||||
virtual void run_service();
|
||||
virtual void stop_service();
|
||||
|
||||
public:
|
||||
ZUncommitter();
|
||||
ZUncommitter(ZPageAllocator* page_allocator);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -151,14 +151,14 @@ bool ZVirtualMemoryManager::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front) {
|
||||
ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) {
|
||||
uintptr_t start;
|
||||
|
||||
if (alloc_from_front || size <= ZPageSizeSmall) {
|
||||
// Small page
|
||||
// Small pages are allocated at low addresses, while medium/large pages
|
||||
// are allocated at high addresses (unless forced to be at a low address).
|
||||
if (force_low_address || size <= ZPageSizeSmall) {
|
||||
start = _manager.alloc_from_front(size);
|
||||
} else {
|
||||
// Medium/Large page
|
||||
start = _manager.alloc_from_back(size);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,7 +65,7 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
ZVirtualMemory alloc(size_t size, bool alloc_from_front = false);
|
||||
ZVirtualMemory alloc(size_t size, bool force_low_address);
|
||||
void free(const ZVirtualMemory& vmem);
|
||||
};
|
||||
|
||||
|
@ -1002,18 +1002,13 @@
|
||||
<Event name="ZPageAllocation" category="Java Virtual Machine, GC, Detailed" label="ZGC Page Allocation" description="Allocation of a ZPage" thread="true" stackTrace="true">
|
||||
<Field type="ZPageTypeType" name="type" label="Type" />
|
||||
<Field type="ulong" contentType="bytes" name="size" label="Size" />
|
||||
<Field type="ulong" contentType="bytes" name="usedAfter" label="Used After" />
|
||||
<Field type="ulong" contentType="bytes" name="freeAfter" label="Free After" />
|
||||
<Field type="ulong" contentType="bytes" name="inCacheAfter" label="In Cache After" />
|
||||
<Field type="ulong" contentType="bytes" name="flushed" label="Flushed" />
|
||||
<Field type="ulong" contentType="bytes" name="committed" label="Committed" />
|
||||
<Field type="uint" name="segments" label="Segments" />
|
||||
<Field type="boolean" name="nonBlocking" label="Non-blocking" />
|
||||
<Field type="boolean" name="noReserve" label="No Reserve" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZPageCacheFlush" category="Java Virtual Machine, GC, Detailed" label="ZGC Page Cache Flush" description="Flushing of ZPages" thread="true" stackTrace="true">
|
||||
<Field type="ulong" contentType="bytes" name="flushed" label="Flushed Size" />
|
||||
<Field type="boolean" name="forAllocation" label="For Allocation" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZRelocationSet" category="Java Virtual Machine, GC, Detailed" label="ZGC Relocation Set" thread="true">
|
||||
<Field type="ulong" contentType="bytes" name="total" label="Total" />
|
||||
<Field type="ulong" contentType="bytes" name="empty" label="Empty" />
|
||||
@ -1047,8 +1042,6 @@
|
||||
</Event>
|
||||
|
||||
<Event name="ZUncommit" category="Java Virtual Machine, GC, Detailed" label="ZGC Uncommit" description="Uncommitting of memory" thread="true">
|
||||
<Field type="ulong" contentType="bytes" name="capacityBefore" label="Capacity Before" />
|
||||
<Field type="ulong" contentType="bytes" name="capacityAfter" label="Capacity After" />
|
||||
<Field type="ulong" contentType="bytes" name="uncommitted" label="Uncommitted" />
|
||||
</Event>
|
||||
|
||||
|
@ -721,12 +721,6 @@
|
||||
<setting name="threshold">1 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZPageCacheFlush">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZRelocationSet">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
|
@ -721,12 +721,6 @@
|
||||
<setting name="threshold">1 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZPageCacheFlush">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZRelocationSet">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -140,7 +140,7 @@ public:
|
||||
static void test(void (*function)(ZForwarding*), uint32_t size) {
|
||||
// Create page
|
||||
const ZVirtualMemory vmem(0, ZPageSizeSmall);
|
||||
const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall));
|
||||
const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall, true));
|
||||
ZPage page(ZPageTypeSmall, vmem, pmem);
|
||||
|
||||
page.reset();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,8 +26,8 @@
|
||||
#include "unittest.hpp"
|
||||
|
||||
TEST(ZPhysicalMemoryTest, copy) {
|
||||
const ZPhysicalMemorySegment seg0(0, 100);
|
||||
const ZPhysicalMemorySegment seg1(200, 100);
|
||||
const ZPhysicalMemorySegment seg0(0, 100, true);
|
||||
const ZPhysicalMemorySegment seg1(200, 100, true);
|
||||
|
||||
ZPhysicalMemory pmem0;
|
||||
pmem0.add_segment(seg0);
|
||||
@ -51,14 +51,14 @@ TEST(ZPhysicalMemoryTest, copy) {
|
||||
EXPECT_EQ(pmem2.segment(1).size(), 100u);
|
||||
}
|
||||
|
||||
TEST(ZPhysicalMemoryTest, segments) {
|
||||
const ZPhysicalMemorySegment seg0(0, 1);
|
||||
const ZPhysicalMemorySegment seg1(1, 1);
|
||||
const ZPhysicalMemorySegment seg2(2, 1);
|
||||
const ZPhysicalMemorySegment seg3(3, 1);
|
||||
const ZPhysicalMemorySegment seg4(4, 1);
|
||||
const ZPhysicalMemorySegment seg5(5, 1);
|
||||
const ZPhysicalMemorySegment seg6(6, 1);
|
||||
TEST(ZPhysicalMemoryTest, add) {
|
||||
const ZPhysicalMemorySegment seg0(0, 1, true);
|
||||
const ZPhysicalMemorySegment seg1(1, 1, true);
|
||||
const ZPhysicalMemorySegment seg2(2, 1, true);
|
||||
const ZPhysicalMemorySegment seg3(3, 1, true);
|
||||
const ZPhysicalMemorySegment seg4(4, 1, true);
|
||||
const ZPhysicalMemorySegment seg5(5, 1, true);
|
||||
const ZPhysicalMemorySegment seg6(6, 1, true);
|
||||
|
||||
ZPhysicalMemory pmem0;
|
||||
EXPECT_EQ(pmem0.nsegments(), 0u);
|
||||
@ -113,12 +113,28 @@ TEST(ZPhysicalMemoryTest, segments) {
|
||||
EXPECT_EQ(pmem4.is_null(), false);
|
||||
}
|
||||
|
||||
TEST(ZPhysicalMemoryTest, remove) {
|
||||
ZPhysicalMemory pmem;
|
||||
|
||||
pmem.add_segment(ZPhysicalMemorySegment(10, 10, true));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(30, 10, true));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(50, 10, true));
|
||||
EXPECT_EQ(pmem.nsegments(), 3u);
|
||||
EXPECT_EQ(pmem.size(), 30u);
|
||||
EXPECT_FALSE(pmem.is_null());
|
||||
|
||||
pmem.remove_segments();
|
||||
EXPECT_EQ(pmem.nsegments(), 0u);
|
||||
EXPECT_EQ(pmem.size(), 0u);
|
||||
EXPECT_TRUE(pmem.is_null());
|
||||
}
|
||||
|
||||
TEST(ZPhysicalMemoryTest, split) {
|
||||
ZPhysicalMemory pmem;
|
||||
|
||||
pmem.add_segment(ZPhysicalMemorySegment(0, 10));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(10, 10));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(30, 10));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(0, 10, true));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(10, 10, true));
|
||||
pmem.add_segment(ZPhysicalMemorySegment(30, 10, true));
|
||||
EXPECT_EQ(pmem.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem.size(), 30u);
|
||||
|
||||
@ -140,3 +156,19 @@ TEST(ZPhysicalMemoryTest, split) {
|
||||
EXPECT_EQ(pmem.nsegments(), 0u);
|
||||
EXPECT_EQ(pmem.size(), 0u);
|
||||
}
|
||||
|
||||
TEST(ZPhysicalMemoryTest, split_committed) {
|
||||
ZPhysicalMemory pmem0;
|
||||
pmem0.add_segment(ZPhysicalMemorySegment(0, 10, true));
|
||||
pmem0.add_segment(ZPhysicalMemorySegment(10, 10, false));
|
||||
pmem0.add_segment(ZPhysicalMemorySegment(20, 10, true));
|
||||
pmem0.add_segment(ZPhysicalMemorySegment(30, 10, false));
|
||||
EXPECT_EQ(pmem0.nsegments(), 4u);
|
||||
EXPECT_EQ(pmem0.size(), 40u);
|
||||
|
||||
ZPhysicalMemory pmem1 = pmem0.split_committed();
|
||||
EXPECT_EQ(pmem0.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem0.size(), 20u);
|
||||
EXPECT_EQ(pmem1.nsegments(), 2u);
|
||||
EXPECT_EQ(pmem1.size(), 20u);
|
||||
}
|
||||
|
@ -25,16 +25,11 @@ package gc.z;
|
||||
|
||||
/*
|
||||
* @test TestUncommit
|
||||
* @requires vm.gc.Z & !vm.graal.enabled & vm.compMode != "Xcomp"
|
||||
* @requires vm.gc.Z & !vm.graal.enabled
|
||||
* @summary Test ZGC uncommit unused memory
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 2
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test is disabled when running with -Xcomp, since it seems to affect
|
||||
* the timing of the test, causing memory to appear to be uncommitted too fast.
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 2
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
@ -98,8 +93,7 @@ public class TestUncommit {
|
||||
// Verify
|
||||
if (enabled) {
|
||||
if (beforeUncommit == beforeAlloc) {
|
||||
// Temporarily disabled pending JDK-8245208
|
||||
// throw new Exception("Uncommitted too fast");
|
||||
throw new Exception("Uncommitted too fast");
|
||||
}
|
||||
|
||||
if (afterUncommit >= afterAlloc) {
|
||||
|
@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package jdk.jfr.event.gc.detailed;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static gc.testlibrary.Allocation.blackHole;
|
||||
import jdk.jfr.Recording;
|
||||
import jdk.jfr.consumer.RecordedEvent;
|
||||
import jdk.test.lib.jfr.EventNames;
|
||||
import jdk.test.lib.jfr.Events;
|
||||
|
||||
/**
|
||||
* @test TestZPageCacheFlushEvent
|
||||
* @requires vm.hasJFR & vm.gc.Z
|
||||
* @key jfr
|
||||
* @library /test/lib /test/jdk /test/hotspot/jtreg
|
||||
* @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZPageCacheFlushEvent
|
||||
*/
|
||||
|
||||
public class TestZPageCacheFlushEvent {
|
||||
public static void main(String[] args) throws Exception {
|
||||
try (Recording recording = new Recording()) {
|
||||
// Activate the event we are interested in and start recording
|
||||
recording.enable(EventNames.ZPageCacheFlush);
|
||||
recording.start();
|
||||
|
||||
// Allocate non-large objects, to fill page cache with non-large pages
|
||||
for (int i = 0; i < 128; i++) {
|
||||
blackHole(new byte[256 * 1024]);
|
||||
}
|
||||
|
||||
// Allocate large objects, to provoke page cache flushing
|
||||
for (int i = 0; i < 10; i++) {
|
||||
blackHole(new byte[7 * 1024 * 1024]);
|
||||
}
|
||||
|
||||
recording.stop();
|
||||
|
||||
// Verify recording
|
||||
List<RecordedEvent> events = Events.fromRecording(recording);
|
||||
System.out.println("Events: " + events.size());
|
||||
Events.hasEvents(events);
|
||||
}
|
||||
}
|
||||
}
|
@ -63,12 +63,6 @@ public class TestZUncommitEvent {
|
||||
List<RecordedEvent> events = Events.fromRecording(recording);
|
||||
System.out.println("Events: " + events.size());
|
||||
Events.hasEvents(events);
|
||||
for (RecordedEvent event : Events.fromRecording(recording)) {
|
||||
System.out.println("Event:" + event);
|
||||
final long capacityBefore = Events.assertField(event, "capacityBefore").getValue();
|
||||
final long capacityAfter = Events.assertField(event, "capacityAfter").below(capacityBefore).getValue();
|
||||
Events.assertField(event, "uncommitted").equal(capacityBefore - capacityAfter);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,6 @@ public class EventNames {
|
||||
public final static String GCPhaseConcurrentLevel1 = PREFIX + "GCPhaseConcurrentLevel1";
|
||||
public final static String ZAllocationStall = PREFIX + "ZAllocationStall";
|
||||
public final static String ZPageAllocation = PREFIX + "ZPageAllocation";
|
||||
public final static String ZPageCacheFlush = PREFIX + "ZPageCacheFlush";
|
||||
public final static String ZRelocationSet = PREFIX + "ZRelocationSet";
|
||||
public final static String ZRelocationSetGroup = PREFIX + "ZRelocationSetGroup";
|
||||
public final static String ZUncommit = PREFIX + "ZUncommit";
|
||||
|
Loading…
Reference in New Issue
Block a user