8312132: Add tracking of multiple address spaces in NMT
Co-authored-by: Thomas Stuefe <stuefe@openjdk.org> Reviewed-by: stefank, stuefe
This commit is contained in:
parent
d0052c032c
commit
3944e67366
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "gc/z/zJNICritical.hpp"
|
||||
#include "gc/z/zLargePages.hpp"
|
||||
#include "gc/z/zMarkStackAllocator.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zThreadLocalAllocBuffer.hpp"
|
||||
@ -46,6 +47,7 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
|
||||
VM_Version::jdk_debug_level());
|
||||
|
||||
// Early initialization
|
||||
ZNMT::initialize();
|
||||
ZGlobalsPointers::initialize();
|
||||
ZNUMA::initialize();
|
||||
ZCPU::initialize();
|
||||
|
@ -28,92 +28,31 @@
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "nmt/memflags.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
ZNMT::Reservation ZNMT::_reservations[ZMaxVirtualReservations] = {};
|
||||
size_t ZNMT::_num_reservations = 0;
|
||||
MemoryFileTracker::MemoryFile* ZNMT::_device = nullptr;
|
||||
|
||||
size_t ZNMT::reservation_index(zoffset offset, size_t* offset_in_reservation) {
|
||||
assert(_num_reservations > 0, "at least one reservation must exist");
|
||||
|
||||
size_t index = 0;
|
||||
*offset_in_reservation = untype(offset);
|
||||
for (; index < _num_reservations; ++index) {
|
||||
const size_t reservation_size = _reservations[index]._size;
|
||||
if (*offset_in_reservation < reservation_size) {
|
||||
break;
|
||||
}
|
||||
*offset_in_reservation -= reservation_size;
|
||||
}
|
||||
|
||||
assert(index != _num_reservations, "failed to find reservation index");
|
||||
return index;
|
||||
}
|
||||
|
||||
void ZNMT::process_fake_mapping(zoffset offset, size_t size, bool commit) {
|
||||
// In order to satisfy NTM's requirement of an 1:1 mapping between committed
|
||||
// and reserved addresses, a fake mapping from the offset into the reservation
|
||||
// is used.
|
||||
//
|
||||
// These mappings from
|
||||
// [offset, offset + size) -> {[virtual address range], ...}
|
||||
// are stable after the heap has been reserved. No commits proceed any
|
||||
// reservations. Committing and uncommitting the same [offset, offset + size)
|
||||
// range will result in same virtual memory ranges.
|
||||
|
||||
size_t left_to_process = size;
|
||||
size_t offset_in_reservation;
|
||||
for (size_t i = reservation_index(offset, &offset_in_reservation); i < _num_reservations; ++i) {
|
||||
const zaddress_unsafe reservation_start = _reservations[i]._start;
|
||||
const size_t reservation_size = _reservations[i]._size;
|
||||
const size_t sub_range_size = MIN2(left_to_process, reservation_size - offset_in_reservation);
|
||||
const uintptr_t sub_range_addr = untype(reservation_start) + offset_in_reservation;
|
||||
|
||||
// commit / uncommit memory
|
||||
if (commit) {
|
||||
MemTracker::record_virtual_memory_commit((void*)sub_range_addr, sub_range_size, CALLER_PC);
|
||||
} else {
|
||||
ThreadCritical tc;
|
||||
MemTracker::record_virtual_memory_uncommit((address)sub_range_addr, sub_range_size);
|
||||
}
|
||||
|
||||
left_to_process -= sub_range_size;
|
||||
if (left_to_process == 0) {
|
||||
// Processed all nmt registrations
|
||||
return;
|
||||
}
|
||||
|
||||
offset_in_reservation = 0;
|
||||
}
|
||||
|
||||
assert(left_to_process == 0, "everything was not commited");
|
||||
void ZNMT::initialize() {
|
||||
_device = MemTracker::register_file("ZGC heap backing file");
|
||||
}
|
||||
|
||||
void ZNMT::reserve(zaddress_unsafe start, size_t size) {
|
||||
assert(_num_reservations < ZMaxVirtualReservations, "too many reservations");
|
||||
// Keep track of the reservations made in order to create fake mappings
|
||||
// between the reserved and commited memory.
|
||||
// See details in ZNMT::process_fake_mapping
|
||||
_reservations[_num_reservations++] = {start, size};
|
||||
|
||||
MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC, mtJavaHeap);
|
||||
MemTracker::record_virtual_memory_reserve((address)untype(start), size, CALLER_PC, mtJavaHeap);
|
||||
}
|
||||
|
||||
void ZNMT::commit(zoffset offset, size_t size) {
|
||||
// NMT expects a 1-to-1 mapping between virtual and physical memory.
|
||||
// ZGC can temporarily have multiple virtual addresses pointing to
|
||||
// the same physical memory.
|
||||
//
|
||||
// When this function is called we don't know where in the virtual memory
|
||||
// this physical memory will be mapped. So we fake the virtual memory
|
||||
// address by mapping the physical offset into offsets in the reserved
|
||||
// memory space.
|
||||
process_fake_mapping(offset, size, true);
|
||||
MemTracker::allocate_memory_in(ZNMT::_device, untype(offset), size, CALLER_PC, mtJavaHeap);
|
||||
}
|
||||
|
||||
void ZNMT::uncommit(zoffset offset, size_t size) {
|
||||
// We fake the virtual memory address by mapping the physical offset
|
||||
// into offsets in the reserved memory space.
|
||||
// See comment in ZNMT::commit
|
||||
process_fake_mapping(offset, size, false);
|
||||
MemTracker::free_memory_in(ZNMT::_device, untype(offset), size);
|
||||
}
|
||||
|
||||
void ZNMT::map(zaddress_unsafe addr, size_t size, zoffset offset) {
|
||||
// NMT doesn't track mappings at the moment.
|
||||
}
|
||||
|
||||
void ZNMT::unmap(zaddress_unsafe addr, size_t size) {
|
||||
// NMT doesn't track mappings at the moment.
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,25 +29,24 @@
|
||||
#include "gc/z/zMemory.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
class ZNMT : public AllStatic {
|
||||
private:
|
||||
struct Reservation {
|
||||
zaddress_unsafe _start;
|
||||
size_t _size;
|
||||
};
|
||||
static Reservation _reservations[ZMaxVirtualReservations];
|
||||
static size_t _num_reservations;
|
||||
|
||||
static size_t reservation_index(zoffset offset, size_t* offset_in_reservation);
|
||||
static void process_fake_mapping(zoffset offset, size_t size, bool commit);
|
||||
static MemoryFileTracker::MemoryFile* _device;
|
||||
|
||||
public:
|
||||
static void initialize();
|
||||
|
||||
static void reserve(zaddress_unsafe start, size_t size);
|
||||
static void commit(zoffset offset, size_t size);
|
||||
static void uncommit(zoffset offset, size_t size);
|
||||
|
||||
static void map(zaddress_unsafe addr, size_t size, zoffset offset);
|
||||
static void unmap(zaddress_unsafe addr, size_t size);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZNMT_HPP
|
||||
|
@ -137,10 +137,14 @@ class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void MemBaseline::baseline_summary() {
|
||||
MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
|
||||
VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
|
||||
{
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
MemoryFileTracker::Instance::summary_snapshot(&_virtual_memory_snapshot);
|
||||
}
|
||||
|
||||
_metaspace_stats = MetaspaceUtils::get_combined_statistics();
|
||||
}
|
||||
|
||||
@ -189,7 +193,6 @@ void MemBaseline::baseline(bool summaryOnly) {
|
||||
baseline_allocation_sites();
|
||||
_baseline_type = Detail_baselined;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "nmt/mallocTracker.hpp"
|
||||
#include "nmt/memflags.hpp"
|
||||
#include "nmt/memReporter.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "nmt/threadStackTracker.hpp"
|
||||
#include "nmt/virtualMemoryTracker.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -882,4 +883,13 @@ void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stac
|
||||
}
|
||||
|
||||
out->print_cr(")\n");
|
||||
}
|
||||
}
|
||||
|
||||
void MemDetailReporter::report_memory_file_allocations() {
|
||||
stringStream st;
|
||||
{
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
MemoryFileTracker::Instance::print_all_reports_on(&st, scale());
|
||||
}
|
||||
output()->print_raw(st.freeze());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "nmt/memBaseline.hpp"
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "nmt/virtualMemoryTracker.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
|
||||
/*
|
||||
* Base class that provides helpers
|
||||
@ -165,6 +164,7 @@ class MemDetailReporter : public MemSummaryReporter {
|
||||
virtual void report() {
|
||||
MemSummaryReporter::report();
|
||||
report_virtual_memory_map();
|
||||
report_memory_file_allocations();
|
||||
report_detail();
|
||||
}
|
||||
|
||||
@ -173,6 +173,8 @@ class MemDetailReporter : public MemSummaryReporter {
|
||||
void report_detail();
|
||||
// Report virtual memory map
|
||||
void report_virtual_memory_map();
|
||||
// Report all physical devices
|
||||
void report_memory_file_allocations();
|
||||
// Report malloc allocation sites; returns number of omitted sites
|
||||
int report_malloc_sites();
|
||||
// Report virtual memory reservation sites; returns number of omitted sites
|
||||
|
@ -67,6 +67,7 @@ void MemTracker::initialize() {
|
||||
|
||||
if (level > NMT_off) {
|
||||
if (!MallocTracker::initialize(level) ||
|
||||
!MemoryFileTracker::Instance::initialize(level) ||
|
||||
!VirtualMemoryTracker::initialize(level)) {
|
||||
assert(false, "NMT initialization failed");
|
||||
level = NMT_off;
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "nmt/mallocTracker.hpp"
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "nmt/threadStackTracker.hpp"
|
||||
#include "nmt/virtualMemoryTracker.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -166,6 +167,39 @@ class MemTracker : AllStatic {
|
||||
}
|
||||
}
|
||||
|
||||
static inline MemoryFileTracker::MemoryFile* register_file(const char* descriptive_name) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return nullptr;
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
return MemoryFileTracker::Instance::make_file(descriptive_name);
|
||||
}
|
||||
|
||||
static inline void remove_file(MemoryFileTracker::MemoryFile* file) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
assert(file != nullptr, "must be");
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
MemoryFileTracker::Instance::free_file(file);
|
||||
}
|
||||
|
||||
static inline void allocate_memory_in(MemoryFileTracker::MemoryFile* file, size_t offset, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
assert(file != nullptr, "must be");
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
MemoryFileTracker::Instance::allocate_memory(file, offset, size, stack, flag);
|
||||
}
|
||||
|
||||
static inline void free_memory_in(MemoryFileTracker::MemoryFile* file,
|
||||
size_t offset, size_t size) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
assert(file != nullptr, "must be");
|
||||
MemoryFileTracker::Instance::Locker lock;
|
||||
MemoryFileTracker::Instance::free_memory(file, offset, size);
|
||||
}
|
||||
|
||||
// Given an existing memory mapping registered with NMT and a splitting
|
||||
// address, split the mapping in two. The memory region is supposed to
|
||||
// be fully uncommitted.
|
||||
|
200
src/hotspot/share/nmt/memoryFileTracker.cpp
Normal file
200
src/hotspot/share/nmt/memoryFileTracker.cpp
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "nmt/nmtNativeCallStackStorage.hpp"
|
||||
#include "nmt/vmatree.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
MemoryFileTracker* MemoryFileTracker::Instance::_tracker = nullptr;
|
||||
PlatformMutex* MemoryFileTracker::Instance::_mutex = nullptr;
|
||||
|
||||
MemoryFileTracker::MemoryFileTracker(bool is_detailed_mode)
|
||||
: _stack_storage(is_detailed_mode), _files() {}
|
||||
|
||||
void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
|
||||
size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag) {
|
||||
NativeCallStackStorage::StackIndex sidx = _stack_storage.push(stack);
|
||||
VMATree::RegionData regiondata(sidx, flag);
|
||||
VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata);
|
||||
for (int i = 0; i < mt_number_of_types; i++) {
|
||||
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i));
|
||||
summary->reserve_memory(diff.flag[i].commit);
|
||||
summary->commit_memory(diff.flag[i].commit);
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) {
|
||||
VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size);
|
||||
for (int i = 0; i < mt_number_of_types; i++) {
|
||||
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i));
|
||||
summary->reserve_memory(diff.flag[i].commit);
|
||||
summary->commit_memory(diff.flag[i].commit);
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* stream, size_t scale) {
|
||||
assert(MemTracker::tracking_level() == NMT_detail, "must");
|
||||
|
||||
stream->print_cr("Memory map of %s", file->_descriptive_name);
|
||||
stream->cr();
|
||||
VMATree::TreapNode* prev = nullptr;
|
||||
#ifdef ASSERT
|
||||
VMATree::TreapNode* broken_start = nullptr;
|
||||
VMATree::TreapNode* broken_end = nullptr;
|
||||
#endif
|
||||
file->_tree.visit_in_order([&](VMATree::TreapNode* current) {
|
||||
if (prev == nullptr) {
|
||||
// Must be first node.
|
||||
prev = current;
|
||||
return;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (broken_start != nullptr && prev->val().out.type() != current->val().in.type()) {
|
||||
broken_start = prev;
|
||||
broken_end = current;
|
||||
}
|
||||
#endif
|
||||
if (prev->val().out.type() == VMATree::StateType::Committed) {
|
||||
const VMATree::position& start_addr = prev->key();
|
||||
const VMATree::position& end_addr = current->key();
|
||||
stream->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] allocated " SIZE_FORMAT "%s" " for %s from",
|
||||
start_addr, end_addr,
|
||||
NMTUtil::amount_in_scale(end_addr - start_addr, scale),
|
||||
NMTUtil::scale_name(scale),
|
||||
NMTUtil::flag_to_name(prev->val().out.flag()));
|
||||
_stack_storage.get(prev->val().out.stack()).print_on(stream, 4);
|
||||
stream->cr();
|
||||
}
|
||||
prev = current;
|
||||
});
|
||||
#ifdef ASSERT
|
||||
if (broken_start != nullptr) {
|
||||
tty->print_cr("Broken tree found with first occurrence at nodes %zu, %zu",
|
||||
broken_start->key(), broken_end->key());
|
||||
tty->print_cr("Expected start out to have same type as end in, but was: %s, %s",
|
||||
VMATree::statetype_to_string(broken_start->val().out.type()),
|
||||
VMATree::statetype_to_string(broken_end->val().in.type()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
MemoryFileTracker::MemoryFile* MemoryFileTracker::make_file(const char* descriptive_name) {
|
||||
MemoryFile* file_place = new MemoryFile{descriptive_name};
|
||||
_files.push(file_place);
|
||||
return file_place;
|
||||
}
|
||||
|
||||
void MemoryFileTracker::free_file(MemoryFile* file) {
|
||||
if (file == nullptr) return;
|
||||
_files.remove(file);
|
||||
delete file;
|
||||
}
|
||||
|
||||
const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& MemoryFileTracker::files() {
|
||||
return _files;
|
||||
}
|
||||
|
||||
bool MemoryFileTracker::Instance::initialize(NMT_TrackingLevel tracking_level) {
|
||||
if (tracking_level == NMT_TrackingLevel::NMT_off) return true;
|
||||
_tracker = static_cast<MemoryFileTracker*>(os::malloc(sizeof(MemoryFileTracker), mtNMT));
|
||||
if (_tracker == nullptr) return false;
|
||||
new (_tracker) MemoryFileTracker(tracking_level == NMT_TrackingLevel::NMT_detail);
|
||||
_mutex = new PlatformMutex();
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::allocate_memory(MemoryFile* file, size_t offset,
|
||||
size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag) {
|
||||
_tracker->allocate_memory(file, offset, size, stack, flag);
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::free_memory(MemoryFile* file, size_t offset, size_t size) {
|
||||
_tracker->free_memory(file, offset, size);
|
||||
}
|
||||
|
||||
MemoryFileTracker::MemoryFile*
|
||||
MemoryFileTracker::Instance::make_file(const char* descriptive_name) {
|
||||
return _tracker->make_file(descriptive_name);
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::free_file(MemoryFileTracker::MemoryFile* file) {
|
||||
return _tracker->free_file(file);
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::print_report_on(const MemoryFile* file,
|
||||
outputStream* stream, size_t scale) {
|
||||
assert(file != nullptr, "must be");
|
||||
assert(stream != nullptr, "must be");
|
||||
_tracker->print_report_on(file, stream, scale);
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::print_all_reports_on(outputStream* stream, size_t scale) {
|
||||
const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& files =
|
||||
MemoryFileTracker::Instance::files();
|
||||
stream->cr();
|
||||
stream->print_cr("Memory file details");
|
||||
stream->cr();
|
||||
for (int i = 0; i < files.length(); i++) {
|
||||
MemoryFileTracker::MemoryFile* file = files.at(i);
|
||||
MemoryFileTracker::Instance::print_report_on(file, stream, scale);
|
||||
}
|
||||
}
|
||||
|
||||
const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& MemoryFileTracker::Instance::files() {
|
||||
return _tracker->files();
|
||||
};
|
||||
|
||||
void MemoryFileTracker::summary_snapshot(VirtualMemorySnapshot* snapshot) const {
|
||||
for (int d = 0; d < _files.length(); d++) {
|
||||
const MemoryFile* file = _files.at(d);
|
||||
for (int i = 0; i < mt_number_of_types; i++) {
|
||||
VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_flag(i));
|
||||
const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_flag(i));
|
||||
// Only account the committed memory.
|
||||
snap->commit_memory(current->committed());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryFileTracker::Instance::summary_snapshot(VirtualMemorySnapshot* snapshot) {
|
||||
_tracker->summary_snapshot(snapshot);
|
||||
}
|
||||
|
||||
MemoryFileTracker::Instance::Locker::Locker() {
|
||||
MemoryFileTracker::Instance::_mutex->lock();
|
||||
}
|
||||
|
||||
MemoryFileTracker::Instance::Locker::~Locker() {
|
||||
MemoryFileTracker::Instance::_mutex->unlock();
|
||||
}
|
111
src/hotspot/share/nmt/memoryFileTracker.hpp
Normal file
111
src/hotspot/share/nmt/memoryFileTracker.hpp
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_NMT_MEMORYFILETRACKER_HPP
|
||||
#define SHARE_NMT_MEMORYFILETRACKER_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "nmt/nmtNativeCallStackStorage.hpp"
|
||||
#include "nmt/virtualMemoryTracker.hpp"
|
||||
#include "nmt/vmatree.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// The MemoryFileTracker tracks memory of 'memory files',
|
||||
// storage with its own memory space separate from the process.
|
||||
// A typical example of such a file is a memory mapped file.
|
||||
class MemoryFileTracker {
|
||||
friend class MemoryFileTrackerTest;
|
||||
|
||||
// Provide caching of stacks.
|
||||
NativeCallStackStorage _stack_storage;
|
||||
|
||||
public:
|
||||
class MemoryFile : public CHeapObj<mtNMT> {
|
||||
friend MemoryFileTracker;
|
||||
friend class MemoryFileTrackerTest;
|
||||
const char* _descriptive_name;
|
||||
VirtualMemorySnapshot _summary;
|
||||
VMATree _tree;
|
||||
public:
|
||||
NONCOPYABLE(MemoryFile);
|
||||
MemoryFile(const char* descriptive_name)
|
||||
: _descriptive_name(descriptive_name) {}
|
||||
};
|
||||
|
||||
private:
|
||||
// We need pointers to each allocated file.
|
||||
GrowableArrayCHeap<MemoryFile*, mtNMT> _files;
|
||||
|
||||
public:
|
||||
MemoryFileTracker(bool is_detailed_mode);
|
||||
|
||||
void allocate_memory(MemoryFile* file, size_t offset, size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag);
|
||||
void free_memory(MemoryFile* file, size_t offset, size_t size);
|
||||
|
||||
MemoryFile* make_file(const char* descriptive_name);
|
||||
void free_file(MemoryFile* file);
|
||||
|
||||
void summary_snapshot(VirtualMemorySnapshot* snapshot) const;
|
||||
|
||||
// Print detailed report of file
|
||||
void print_report_on(const MemoryFile* file, outputStream* stream, size_t scale);
|
||||
|
||||
const GrowableArrayCHeap<MemoryFile*, mtNMT>& files();
|
||||
|
||||
class Instance : public AllStatic {
|
||||
static MemoryFileTracker* _tracker;
|
||||
static PlatformMutex* _mutex;
|
||||
|
||||
public:
|
||||
class Locker : public StackObj {
|
||||
public:
|
||||
Locker();
|
||||
~Locker();
|
||||
};
|
||||
|
||||
static bool initialize(NMT_TrackingLevel tracking_level);
|
||||
|
||||
static MemoryFile* make_file(const char* descriptive_name);
|
||||
static void free_file(MemoryFile* device);
|
||||
|
||||
static void allocate_memory(MemoryFile* device, size_t offset, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag);
|
||||
static void free_memory(MemoryFile* device, size_t offset, size_t size);
|
||||
|
||||
static void summary_snapshot(VirtualMemorySnapshot* snapshot);
|
||||
|
||||
static void print_report_on(const MemoryFile* device, outputStream* stream, size_t scale);
|
||||
static void print_all_reports_on(outputStream* stream, size_t scale);
|
||||
|
||||
static const GrowableArrayCHeap<MemoryFile*, mtNMT>& files();
|
||||
};
|
||||
};
|
||||
|
||||
#endif // SHARE_NMT_MEMORYFILETRACKER_HPP
|
134
src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp
Normal file
134
src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP
|
||||
#define SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/arena.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
// Virtual memory regions that are tracked by NMT also have their NativeCallStack (NCS) tracked.
|
||||
// NCS:s are:
|
||||
// - Fairly large
|
||||
// - Regularly compared for equality
|
||||
// - Read a lot when a detailed report is printed
|
||||
// Therefore we'd like:
|
||||
// - To not store duplicates
|
||||
// - Have fast comparisons
|
||||
// - Have constant time access
|
||||
// We achieve this by using a closed hashtable for finding previously existing NCS:s and referring to them by an index that's smaller than a pointer.
|
||||
class NativeCallStackStorage : public CHeapObj<mtNMT> {
|
||||
public:
|
||||
struct StackIndex {
|
||||
friend NativeCallStackStorage;
|
||||
|
||||
private:
|
||||
static constexpr const int32_t _invalid = -1;
|
||||
|
||||
int32_t _stack_index;
|
||||
StackIndex(int32_t stack_index)
|
||||
: _stack_index(stack_index) {
|
||||
}
|
||||
|
||||
public:
|
||||
static bool equals(const StackIndex& a, const StackIndex& b) {
|
||||
return a._stack_index == b._stack_index;
|
||||
}
|
||||
|
||||
bool is_invalid() {
|
||||
return _stack_index == _invalid;
|
||||
}
|
||||
|
||||
StackIndex()
|
||||
: _stack_index(_invalid) {
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
struct Link : public ArenaObj {
|
||||
Link* next;
|
||||
StackIndex stack;
|
||||
Link(Link* next, StackIndex v)
|
||||
: next(next),
|
||||
stack(v) {
|
||||
}
|
||||
};
|
||||
StackIndex put(const NativeCallStack& value) {
|
||||
int bucket = value.calculate_hash() % _table_size;
|
||||
Link* link = _table[bucket];
|
||||
while (link != nullptr) {
|
||||
if (value.equals(get(link->stack))) {
|
||||
return link->stack;
|
||||
}
|
||||
link = link->next;
|
||||
}
|
||||
int idx = _stacks.append(value);
|
||||
Link* new_link = new (&_arena) Link(_table[bucket], StackIndex(idx));
|
||||
_table[bucket] = new_link;
|
||||
return new_link->stack;
|
||||
}
|
||||
|
||||
// For storage of the Links
|
||||
Arena _arena;
|
||||
// Pick a prime number of buckets.
|
||||
// 4099 gives a 50% probability of collisions at 76 stacks (as per birthday problem).
|
||||
static const constexpr int default_table_size = 4099;
|
||||
int _table_size;
|
||||
Link** _table;
|
||||
GrowableArrayCHeap<NativeCallStack, mtNMT> _stacks;
|
||||
const bool _is_detailed_mode;
|
||||
|
||||
const NativeCallStack _fake_stack;
|
||||
public:
|
||||
|
||||
StackIndex push(const NativeCallStack& stack) {
|
||||
// Not in detailed mode, so not tracking stacks.
|
||||
if (!_is_detailed_mode) {
|
||||
return StackIndex();
|
||||
}
|
||||
return put(stack);
|
||||
}
|
||||
|
||||
const inline NativeCallStack& get(StackIndex si) {
|
||||
if (si._stack_index == -1) {
|
||||
return _fake_stack;
|
||||
}
|
||||
return _stacks.at(si._stack_index);
|
||||
}
|
||||
|
||||
NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size)
|
||||
: _arena(mtNMT), _table_size(table_size), _table(nullptr), _stacks(),
|
||||
_is_detailed_mode(is_detailed_mode), _fake_stack() {
|
||||
if (_is_detailed_mode) {
|
||||
_table = NEW_ARENA_ARRAY(&_arena, Link*, _table_size);
|
||||
for (int i = 0; i < _table_size; i++) {
|
||||
_table[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP
|
376
src/hotspot/share/nmt/nmtTreap.hpp
Normal file
376
src/hotspot/share/nmt/nmtTreap.hpp
Normal file
@ -0,0 +1,376 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_NMT_NMTTREAP_HPP
|
||||
#define SHARE_NMT_NMTTREAP_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include <stdint.h>
|
||||
|
||||
// A Treap is a self-balanced binary tree where each node is equipped with a
|
||||
// priority. It adds the invariant that the priority of a parent P is strictly larger
|
||||
// larger than the priority of its children. When priorities are randomly
|
||||
// assigned the tree is balanced.
|
||||
// All operations are defined through merge and split, which are each other's inverse.
|
||||
// merge(left_treap, right_treap) => treap where left_treap <= right_treap
|
||||
// split(treap, key) => (left_treap, right_treap) where left_treap <= right_treap
|
||||
// Recursion is used in these, but the depth of the call stack is the depth of
|
||||
// the tree which is O(log n) so we are safe from stack overflow.
|
||||
// TreapNode has LEQ nodes on the left, GT nodes on the right.
|
||||
//
|
||||
// COMPARATOR must have a static function `cmp(a,b)` which returns:
|
||||
// - an int < 0 when a < b
|
||||
// - an int == 0 when a == b
|
||||
// - an int > 0 when a > b
|
||||
// ALLOCATOR must check for oom and exit, as Treap currently does not handle the allocation
|
||||
// failing.
|
||||
|
||||
template<typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
|
||||
class Treap {
|
||||
friend class VMATreeTest;
|
||||
friend class TreapTest;
|
||||
public:
|
||||
class TreapNode {
|
||||
friend Treap;
|
||||
uint64_t _priority;
|
||||
const K _key;
|
||||
V _value;
|
||||
|
||||
TreapNode* _left;
|
||||
TreapNode* _right;
|
||||
|
||||
public:
|
||||
TreapNode(const K& k, const V& v, uint64_t p)
|
||||
: _priority(p),
|
||||
_key(k),
|
||||
_value(v),
|
||||
_left(nullptr),
|
||||
_right(nullptr) {
|
||||
}
|
||||
|
||||
const K& key() const { return _key; }
|
||||
V& val() { return _value; }
|
||||
|
||||
TreapNode* left() const { return _left; }
|
||||
TreapNode* right() const { return _right; }
|
||||
};
|
||||
|
||||
private:
|
||||
ALLOCATOR _allocator;
|
||||
TreapNode* _root;
|
||||
uint64_t _prng_seed;
|
||||
int _node_count;
|
||||
|
||||
uint64_t prng_next() {
|
||||
// Taken directly off of JFRPrng
|
||||
static const constexpr uint64_t PrngMult = 0x5DEECE66DLL;
|
||||
static const constexpr uint64_t PrngAdd = 0xB;
|
||||
static const constexpr uint64_t PrngModPower = 48;
|
||||
static const constexpr uint64_t PrngModMask = (static_cast<uint64_t>(1) << PrngModPower) - 1;
|
||||
_prng_seed = (PrngMult * _prng_seed + PrngAdd) & PrngModMask;
|
||||
return _prng_seed;
|
||||
}
|
||||
|
||||
struct node_pair {
|
||||
TreapNode* left;
|
||||
TreapNode* right;
|
||||
};
|
||||
|
||||
enum SplitMode {
|
||||
LT, // <
|
||||
LEQ // <=
|
||||
};
|
||||
|
||||
// Split tree at head into two trees, SplitMode decides where EQ values go.
|
||||
// We have SplitMode because it makes remove() trivial to implement.
|
||||
static node_pair split(TreapNode* head, const K& key, SplitMode mode = LEQ DEBUG_ONLY(COMMA int recur_count = 0)) {
|
||||
assert(recur_count < 200, "Call-stack depth should never exceed 200");
|
||||
|
||||
if (head == nullptr) {
|
||||
return {nullptr, nullptr};
|
||||
}
|
||||
if ((COMPARATOR::cmp(head->_key, key) <= 0 && mode == LEQ) || (COMPARATOR::cmp(head->_key, key) < 0 && mode == LT)) {
|
||||
node_pair p = split(head->_right, key, mode DEBUG_ONLY(COMMA recur_count + 1));
|
||||
head->_right = p.left;
|
||||
return node_pair{head, p.right};
|
||||
} else {
|
||||
node_pair p = split(head->_left, key, mode DEBUG_ONLY(COMMA recur_count + 1));
|
||||
head->_left = p.right;
|
||||
return node_pair{p.left, head};
|
||||
}
|
||||
}
|
||||
|
||||
// Invariant: left is a treap whose keys are LEQ to the keys in right.
|
||||
static TreapNode* merge(TreapNode* left, TreapNode* right DEBUG_ONLY(COMMA int recur_count = 0)) {
|
||||
assert(recur_count < 200, "Call-stack depth should never exceed 200");
|
||||
|
||||
if (left == nullptr) return right;
|
||||
if (right == nullptr) return left;
|
||||
|
||||
if (left->_priority > right->_priority) {
|
||||
// We need
|
||||
// LEFT
|
||||
// |
|
||||
// RIGHT
|
||||
// for the invariant re: priorities to hold.
|
||||
left->_right = merge(left->_right, right DEBUG_ONLY(COMMA recur_count + 1));
|
||||
return left;
|
||||
} else {
|
||||
// We need
|
||||
// RIGHT
|
||||
// |
|
||||
// LEFT
|
||||
// for the invariant re: priorities to hold.
|
||||
right->_left = merge(left, right->_left DEBUG_ONLY(COMMA recur_count + 1));
|
||||
return right;
|
||||
}
|
||||
}
|
||||
|
||||
static TreapNode* find(TreapNode* node, const K& k DEBUG_ONLY(COMMA int recur_count = 0)) {
|
||||
if (node == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int key_cmp_k = COMPARATOR::cmp(node->key(), k);
|
||||
|
||||
if (key_cmp_k == 0) { // key EQ k
|
||||
return node;
|
||||
}
|
||||
|
||||
if (key_cmp_k < 0) { // key LT k
|
||||
return find(node->right(), k DEBUG_ONLY(COMMA recur_count + 1));
|
||||
} else { // key GT k
|
||||
return find(node->left(), k DEBUG_ONLY(COMMA recur_count + 1));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_self() {
|
||||
// A balanced binary search tree should have a depth on the order of log(N).
|
||||
// We take the ceiling of log_2(N + 1) * 2.5 as our maximum bound.
|
||||
// For comparison, a RB-tree has a proven max depth of log_2(N + 1) * 2.
|
||||
const int expected_maximum_depth = ceil((log(this->_node_count+1) / log(2)) * 2.5);
|
||||
// Find the maximum depth through DFS and ensure that the priority invariant holds.
|
||||
int maximum_depth_found = 0;
|
||||
|
||||
struct DFS {
|
||||
int depth;
|
||||
uint64_t parent_prio;
|
||||
TreapNode* n;
|
||||
};
|
||||
GrowableArrayCHeap<DFS, mtNMT> to_visit;
|
||||
constexpr const uint64_t positive_infinity = 0xFFFFFFFFFFFFFFFF;
|
||||
|
||||
to_visit.push({0, positive_infinity, this->_root});
|
||||
while (!to_visit.is_empty()) {
|
||||
DFS head = to_visit.pop();
|
||||
if (head.n == nullptr) continue;
|
||||
maximum_depth_found = MAX2(maximum_depth_found, head.depth);
|
||||
|
||||
assert(head.parent_prio >= head.n->_priority, "broken priority invariant");
|
||||
|
||||
to_visit.push({head.depth + 1, head.n->_priority, head.n->left()});
|
||||
to_visit.push({head.depth + 1, head.n->_priority, head.n->right()});
|
||||
}
|
||||
assert(maximum_depth_found - expected_maximum_depth <= 3,
|
||||
"depth unexpectedly large for treap of node count %d, was: %d, expected between %d and %d",
|
||||
_node_count, maximum_depth_found, expected_maximum_depth - 3, expected_maximum_depth);
|
||||
|
||||
// Visit everything in order, see that the key ordering is monotonically increasing.
|
||||
TreapNode* last_seen = nullptr;
|
||||
bool failed = false;
|
||||
int seen_count = 0;
|
||||
this->visit_in_order([&](TreapNode* node) {
|
||||
seen_count++;
|
||||
if (last_seen == nullptr) {
|
||||
last_seen = node;
|
||||
return;
|
||||
}
|
||||
if (COMPARATOR::cmp(last_seen->key(), node->key()) > 0) {
|
||||
failed = false;
|
||||
}
|
||||
last_seen = node;
|
||||
});
|
||||
assert(seen_count == _node_count, "the number of visited nodes do not match with the number of stored nodes");
|
||||
assert(!failed, "keys was not monotonically strongly increasing when visiting in order");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
public:
|
||||
NONCOPYABLE(Treap);
|
||||
|
||||
Treap(uint64_t seed = static_cast<uint64_t>(os::random())
|
||||
| (static_cast<uint64_t>(os::random()) << 32))
|
||||
: _allocator(),
|
||||
_root(nullptr),
|
||||
_prng_seed(seed),
|
||||
_node_count(0) {}
|
||||
|
||||
~Treap() {
|
||||
this->remove_all();
|
||||
}
|
||||
|
||||
void upsert(const K& k, const V& v) {
|
||||
TreapNode* found = find(_root, k);
|
||||
if (found != nullptr) {
|
||||
// Already exists, update value.
|
||||
found->_value = v;
|
||||
return;
|
||||
}
|
||||
_node_count++;
|
||||
// Doesn't exist, make node
|
||||
void* node_place = _allocator.allocate(sizeof(TreapNode));
|
||||
uint64_t prio = prng_next();
|
||||
TreapNode* node = new (node_place) TreapNode(k, v, prio);
|
||||
|
||||
// (LEQ_k, GT_k)
|
||||
node_pair split_up = split(this->_root, k);
|
||||
// merge(merge(LEQ_k, EQ_k), GT_k)
|
||||
this->_root = merge(merge(split_up.left, node), split_up.right);
|
||||
}
|
||||
|
||||
void remove(const K& k) {
|
||||
// (LEQ_k, GT_k)
|
||||
node_pair first_split = split(this->_root, k, LEQ);
|
||||
// (LT_k, GEQ_k) == (LT_k, EQ_k) since it's from LEQ_k and keys are unique.
|
||||
node_pair second_split = split(first_split.left, k, LT);
|
||||
|
||||
if (second_split.right != nullptr) {
|
||||
// The key k existed, we delete it.
|
||||
_node_count--;
|
||||
_allocator.free(second_split.right);
|
||||
}
|
||||
// Merge together everything
|
||||
_root = merge(second_split.left, first_split.right);
|
||||
}
|
||||
|
||||
// Delete all nodes.
|
||||
void remove_all() {
|
||||
_node_count = 0;
|
||||
GrowableArrayCHeap<TreapNode*, mtNMT> to_delete;
|
||||
to_delete.push(_root);
|
||||
|
||||
while (!to_delete.is_empty()) {
|
||||
TreapNode* head = to_delete.pop();
|
||||
if (head == nullptr) continue;
|
||||
to_delete.push(head->_left);
|
||||
to_delete.push(head->_right);
|
||||
_allocator.free(head);
|
||||
}
|
||||
_root = nullptr;
|
||||
}
|
||||
|
||||
TreapNode* closest_leq(const K& key) {
|
||||
TreapNode* candidate = nullptr;
|
||||
TreapNode* pos = _root;
|
||||
while (pos != nullptr) {
|
||||
int cmp_r = COMPARATOR::cmp(pos->key(), key);
|
||||
if (cmp_r == 0) { // Exact match
|
||||
candidate = pos;
|
||||
break; // Can't become better than that.
|
||||
}
|
||||
if (cmp_r < 0) {
|
||||
// Found a match, try to find a better one.
|
||||
candidate = pos;
|
||||
pos = pos->_right;
|
||||
} else if (cmp_r > 0) {
|
||||
pos = pos->_left;
|
||||
}
|
||||
}
|
||||
return candidate;
|
||||
}
|
||||
|
||||
// Visit all TreapNodes in ascending key order.
|
||||
template<typename F>
|
||||
void visit_in_order(F f) const {
|
||||
GrowableArrayCHeap<TreapNode*, mtNMT> to_visit;
|
||||
TreapNode* head = _root;
|
||||
while (!to_visit.is_empty() || head != nullptr) {
|
||||
while (head != nullptr) {
|
||||
to_visit.push(head);
|
||||
head = head->left();
|
||||
}
|
||||
head = to_visit.pop();
|
||||
f(head);
|
||||
head = head->right();
|
||||
}
|
||||
}
|
||||
|
||||
// Visit all TreapNodes in ascending order whose keys are in range [from, to).
|
||||
template<typename F>
|
||||
void visit_range_in_order(const K& from, const K& to, F f) {
|
||||
assert(COMPARATOR::cmp(from, to) <= 0, "from must be less or equal to to");
|
||||
GrowableArrayCHeap<TreapNode*, mtNMT> to_visit;
|
||||
TreapNode* head = _root;
|
||||
while (!to_visit.is_empty() || head != nullptr) {
|
||||
while (head != nullptr) {
|
||||
int cmp_from = COMPARATOR::cmp(head->key(), from);
|
||||
to_visit.push(head);
|
||||
if (cmp_from >= 0) {
|
||||
head = head->left();
|
||||
} else {
|
||||
// We've reached a node which is strictly less than from
|
||||
// We don't need to visit any further to the left.
|
||||
break;
|
||||
}
|
||||
}
|
||||
head = to_visit.pop();
|
||||
const int cmp_from = COMPARATOR::cmp(head->key(), from);
|
||||
const int cmp_to = COMPARATOR::cmp(head->key(), to);
|
||||
if (cmp_from >= 0 && cmp_to < 0) {
|
||||
f(head);
|
||||
}
|
||||
if (cmp_to < 0) {
|
||||
head = head->right();
|
||||
} else {
|
||||
head = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TreapCHeapAllocator {
|
||||
public:
|
||||
void* allocate(size_t sz) {
|
||||
void* allocation = os::malloc(sz, mtNMT);
|
||||
if (allocation == nullptr) {
|
||||
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "treap failed allocation");
|
||||
}
|
||||
return allocation;
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
os::free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename K, typename V, typename COMPARATOR>
|
||||
using TreapCHeap = Treap<K, V, COMPARATOR, TreapCHeapAllocator>;
|
||||
|
||||
#endif //SHARE_NMT_NMTTREAP_HPP
|
@ -30,6 +30,7 @@
|
||||
#include "memory/metaspaceStats.hpp"
|
||||
#include "nmt/allocationSite.hpp"
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/linkedlist.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
199
src/hotspot/share/nmt/vmatree.cpp
Normal file
199
src/hotspot/share/nmt/vmatree.cpp
Normal file
@ -0,0 +1,199 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024, Red Hat Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "nmt/vmatree.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
const VMATree::RegionData VMATree::empty_regiondata{NativeCallStackStorage::StackIndex{}, mtNone};
|
||||
|
||||
const char* VMATree::statetype_strings[3] = {
|
||||
"reserved", "committed", "released",
|
||||
};
|
||||
|
||||
VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType state,
|
||||
const RegionData& metadata) {
|
||||
if (A == B) {
|
||||
// A 0-sized mapping isn't worth recording.
|
||||
return SummaryDiff();
|
||||
}
|
||||
|
||||
IntervalChange stA{
|
||||
IntervalState{StateType::Released, empty_regiondata},
|
||||
IntervalState{ state, metadata}
|
||||
};
|
||||
IntervalChange stB{
|
||||
IntervalState{ state, metadata},
|
||||
IntervalState{StateType::Released, empty_regiondata}
|
||||
};
|
||||
|
||||
// First handle A.
|
||||
// Find closest node that is LEQ A
|
||||
bool LEQ_A_found = false;
|
||||
AddressState LEQ_A;
|
||||
TreapNode* leqA_n = _tree.closest_leq(A);
|
||||
if (leqA_n == nullptr) {
|
||||
// No match. We add the A node directly, unless it would have no effect.
|
||||
if (!stA.is_noop()) {
|
||||
_tree.upsert(A, stA);
|
||||
}
|
||||
} else {
|
||||
LEQ_A_found = true;
|
||||
LEQ_A = AddressState{leqA_n->key(), leqA_n->val()};
|
||||
// Unless we know better, let B's outgoing state be the outgoing state of the node at or preceding A.
|
||||
// Consider the case where the found node is the start of a region enclosing [A,B)
|
||||
stB.out = leqA_n->val().out;
|
||||
|
||||
// Direct address match.
|
||||
if (leqA_n->key() == A) {
|
||||
// Take over in state from old address.
|
||||
stA.in = leqA_n->val().in;
|
||||
|
||||
// We may now be able to merge two regions:
|
||||
// If the node's old state matches the new, it becomes a noop. That happens, for example,
|
||||
// when expanding a committed area: commit [x1, A); ... commit [A, x3)
|
||||
// and the result should be a larger area, [x1, x3). In that case, the middle node (A and le_n)
|
||||
// is not needed anymore. So we just remove the old node.
|
||||
stB.in = stA.out;
|
||||
if (stA.is_noop()) {
|
||||
// invalidates leqA_n
|
||||
_tree.remove(leqA_n->key());
|
||||
} else {
|
||||
// If the state is not matching then we have different operations, such as:
|
||||
// reserve [x1, A); ... commit [A, x2); or
|
||||
// reserve [x1, A), flag1; ... reserve [A, x2), flag2; or
|
||||
// reserve [A, x1), flag1; ... reserve [A, x2), flag2;
|
||||
// then we re-use the existing out node, overwriting its old metadata.
|
||||
leqA_n->val() = stA;
|
||||
}
|
||||
} else {
|
||||
// The address must be smaller.
|
||||
assert(A > leqA_n->key(), "must be");
|
||||
|
||||
// We add a new node, but only if there would be a state change. If there would not be a
|
||||
// state change, we just omit the node.
|
||||
// That happens, for example, when reserving within an already reserved region with identical metadata.
|
||||
stA.in = leqA_n->val().out; // .. and the region's prior state is the incoming state
|
||||
if (stA.is_noop()) {
|
||||
// Nothing to do.
|
||||
} else {
|
||||
// Add new node.
|
||||
_tree.upsert(A, stA);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now we handle B.
|
||||
// We first search all nodes that are (A, B]. All of these nodes
|
||||
// need to be deleted and summary accounted for. The last node before B determines B's outgoing state.
|
||||
// If there is no node between A and B, its A's incoming state.
|
||||
GrowableArrayCHeap<AddressState, mtNMT> to_be_deleted_inbetween_a_b;
|
||||
bool B_needs_insert = true;
|
||||
|
||||
// Find all nodes between (A, B] and record their addresses and values. Also update B's
|
||||
// outgoing state.
|
||||
_tree.visit_range_in_order(A + 1, B + 1, [&](TreapNode* head) {
|
||||
int cmp_B = PositionComparator::cmp(head->key(), B);
|
||||
stB.out = head->val().out;
|
||||
if (cmp_B < 0) {
|
||||
// Record all nodes preceding B.
|
||||
to_be_deleted_inbetween_a_b.push({head->key(), head->val()});
|
||||
} else if (cmp_B == 0) {
|
||||
// Re-purpose B node, unless it would result in a noop node, in
|
||||
// which case record old node at B for deletion and summary accounting.
|
||||
if (stB.is_noop()) {
|
||||
to_be_deleted_inbetween_a_b.push(AddressState{B, head->val()});
|
||||
} else {
|
||||
head->val() = stB;
|
||||
}
|
||||
B_needs_insert = false;
|
||||
}
|
||||
});
|
||||
|
||||
// Insert B node if needed
|
||||
if (B_needs_insert && // Was not already inserted
|
||||
!stB.is_noop()) // The operation is differing
|
||||
{
|
||||
_tree.upsert(B, stB);
|
||||
}
|
||||
|
||||
// We now need to:
|
||||
// a) Delete all nodes between (A, B]. Including B in the case of a noop.
|
||||
// b) Perform summary accounting
|
||||
SummaryDiff diff;
|
||||
|
||||
if (to_be_deleted_inbetween_a_b.length() == 0 && LEQ_A_found) {
|
||||
// We must have smashed a hole in an existing region (or replaced it entirely).
|
||||
// LEQ_A < A < B <= C
|
||||
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(LEQ_A.out().flag())];
|
||||
if (LEQ_A.out().type() == StateType::Reserved) {
|
||||
rescom.reserve -= B - A;
|
||||
} else if (LEQ_A.out().type() == StateType::Committed) {
|
||||
rescom.commit -= B - A;
|
||||
rescom.reserve -= B - A;
|
||||
}
|
||||
}
|
||||
|
||||
// Track the previous node.
|
||||
AddressState prev{A, stA};
|
||||
for (int i = 0; i < to_be_deleted_inbetween_a_b.length(); i++) {
|
||||
const AddressState delete_me = to_be_deleted_inbetween_a_b.at(i);
|
||||
_tree.remove(delete_me.address);
|
||||
|
||||
// Perform summary accounting
|
||||
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(delete_me.in().flag())];
|
||||
if (delete_me.in().type() == StateType::Reserved) {
|
||||
rescom.reserve -= delete_me.address - prev.address;
|
||||
} else if (delete_me.in().type() == StateType::Committed) {
|
||||
rescom.commit -= delete_me.address - prev.address;
|
||||
rescom.reserve -= delete_me.address - prev.address;
|
||||
}
|
||||
prev = delete_me;
|
||||
}
|
||||
|
||||
if (prev.address != A && prev.out().type() != StateType::Released) {
|
||||
// The last node wasn't released, so it must be connected to a node outside of (A, B)
|
||||
// A - prev - B - (some node >= B)
|
||||
// It might be that prev.address == B == (some node >= B), this is fine.
|
||||
if (prev.out().type() == StateType::Reserved) {
|
||||
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())];
|
||||
rescom.reserve -= B - prev.address;
|
||||
} else if (prev.out().type() == StateType::Committed) {
|
||||
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())];
|
||||
rescom.commit -= B - prev.address;
|
||||
rescom.reserve -= B - prev.address;
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, we can register the new region [A, B)'s summary data.
|
||||
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(metadata.flag)];
|
||||
if (state == StateType::Reserved) {
|
||||
rescom.reserve += B - A;
|
||||
} else if (state == StateType::Committed) {
|
||||
rescom.commit += B - A;
|
||||
rescom.reserve += B - A;
|
||||
}
|
||||
return diff;
|
||||
}
|
191
src/hotspot/share/nmt/vmatree.hpp
Normal file
191
src/hotspot/share/nmt/vmatree.hpp
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_NMT_VMATREE_HPP
|
||||
#define SHARE_NMT_VMATREE_HPP
|
||||
|
||||
#include "nmt/nmtNativeCallStackStorage.hpp"
|
||||
#include "nmt/nmtTreap.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include <cstdint>
|
||||
|
||||
// A VMATree stores a sequence of points on the natural number line.
|
||||
// Each of these points stores information about a state change.
|
||||
// For example, the state may go from released memory to committed memory,
|
||||
// or from committed memory of a certain MEMFLAGS to committed memory of a different MEMFLAGS.
|
||||
// The set of points is stored in a balanced binary tree for efficient querying and updating.
|
||||
class VMATree {
|
||||
friend class VMATreeTest;
|
||||
// A position in memory.
|
||||
public:
|
||||
using position = size_t;
|
||||
|
||||
class PositionComparator {
|
||||
public:
|
||||
static int cmp(position a, position b) {
|
||||
if (a < b) return -1;
|
||||
if (a == b) return 0;
|
||||
if (a > b) return 1;
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
};
|
||||
|
||||
enum class StateType : uint8_t { Reserved, Committed, Released, LAST };
|
||||
|
||||
private:
|
||||
static const char* statetype_strings[static_cast<uint8_t>(StateType::LAST)];
|
||||
|
||||
public:
|
||||
NONCOPYABLE(VMATree);
|
||||
|
||||
static const char* statetype_to_string(StateType type) {
|
||||
assert(type != StateType::LAST, "must be");
|
||||
return statetype_strings[static_cast<uint8_t>(type)];
|
||||
}
|
||||
|
||||
// Each point has some stack and a flag associated with it.
|
||||
struct RegionData {
|
||||
const NativeCallStackStorage::StackIndex stack_idx;
|
||||
const MEMFLAGS flag;
|
||||
|
||||
RegionData() : stack_idx(), flag(mtNone) {}
|
||||
|
||||
RegionData(NativeCallStackStorage::StackIndex stack_idx, MEMFLAGS flag)
|
||||
: stack_idx(stack_idx), flag(flag) {}
|
||||
|
||||
static bool equals(const RegionData& a, const RegionData& b) {
|
||||
return a.flag == b.flag &&
|
||||
NativeCallStackStorage::StackIndex::equals(a.stack_idx, b.stack_idx);
|
||||
}
|
||||
};
|
||||
|
||||
static const RegionData empty_regiondata;
|
||||
|
||||
private:
|
||||
struct IntervalState {
|
||||
private:
|
||||
// Store the type and flag as two bytes
|
||||
uint8_t type_flag[2];
|
||||
NativeCallStackStorage::StackIndex sidx;
|
||||
|
||||
public:
|
||||
IntervalState() : type_flag{0,0}, sidx() {}
|
||||
IntervalState(const StateType type, const RegionData data) {
|
||||
assert(!(type == StateType::Released) || data.flag == mtNone, "Released type must have flag mtNone");
|
||||
type_flag[0] = static_cast<uint8_t>(type);
|
||||
type_flag[1] = static_cast<uint8_t>(data.flag);
|
||||
sidx = data.stack_idx;
|
||||
}
|
||||
|
||||
StateType type() const {
|
||||
return static_cast<StateType>(type_flag[0]);
|
||||
}
|
||||
|
||||
MEMFLAGS flag() const {
|
||||
return static_cast<MEMFLAGS>(type_flag[1]);
|
||||
}
|
||||
|
||||
RegionData regiondata() const {
|
||||
return RegionData{sidx, flag()};
|
||||
}
|
||||
|
||||
const NativeCallStackStorage::StackIndex stack() const {
|
||||
return sidx;
|
||||
}
|
||||
};
|
||||
|
||||
// An IntervalChange indicates a change in state between two intervals. The incoming state
|
||||
// is denoted by in, and the outgoing state is denoted by out.
|
||||
struct IntervalChange {
|
||||
IntervalState in;
|
||||
IntervalState out;
|
||||
|
||||
bool is_noop() {
|
||||
return in.type() == out.type() &&
|
||||
RegionData::equals(in.regiondata(), out.regiondata());
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
using VMATreap = TreapCHeap<position, IntervalChange, PositionComparator>;
|
||||
using TreapNode = VMATreap::TreapNode;
|
||||
|
||||
private:
|
||||
VMATreap _tree;
|
||||
|
||||
// AddressState saves the necessary information for performing online summary accounting.
|
||||
struct AddressState {
|
||||
position address;
|
||||
IntervalChange state;
|
||||
|
||||
const IntervalState& out() const {
|
||||
return state.out;
|
||||
}
|
||||
|
||||
const IntervalState& in() const {
|
||||
return state.in;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
VMATree() : _tree() {}
|
||||
|
||||
struct SingleDiff {
|
||||
using delta = int64_t;
|
||||
delta reserve;
|
||||
delta commit;
|
||||
};
|
||||
struct SummaryDiff {
|
||||
SingleDiff flag[mt_number_of_types];
|
||||
SummaryDiff() {
|
||||
for (int i = 0; i < mt_number_of_types; i++) {
|
||||
flag[i] = SingleDiff{0, 0};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
SummaryDiff register_mapping(position A, position B, StateType state, const RegionData& metadata);
|
||||
|
||||
SummaryDiff reserve_mapping(position from, position sz, const RegionData& metadata) {
|
||||
return register_mapping(from, from + sz, StateType::Reserved, metadata);
|
||||
}
|
||||
|
||||
SummaryDiff commit_mapping(position from, position sz, const RegionData& metadata) {
|
||||
return register_mapping(from, from + sz, StateType::Committed, metadata);
|
||||
}
|
||||
|
||||
SummaryDiff release_mapping(position from, position sz) {
|
||||
return register_mapping(from, from + sz, StateType::Released, VMATree::empty_regiondata);
|
||||
}
|
||||
|
||||
public:
|
||||
template<typename F>
|
||||
void visit_in_order(F f) const {
|
||||
_tree.visit_in_order(f);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
53
test/hotspot/gtest/nmt/test_nmt_memoryfiletracker.cpp
Normal file
53
test/hotspot/gtest/nmt/test_nmt_memoryfiletracker.cpp
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class MemoryFileTrackerTest : public testing::Test {
|
||||
public:
|
||||
size_t sz(int x) { return (size_t) x; }
|
||||
void basics() {
|
||||
MemoryFileTracker tracker(false);
|
||||
MemoryFileTracker::MemoryFile* file = tracker.make_file("test");
|
||||
tracker.allocate_memory(file, 0, 100, CALLER_PC, mtTest);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(100));
|
||||
tracker.allocate_memory(file, 100, 100, CALLER_PC, mtTest);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(200));
|
||||
tracker.allocate_memory(file, 200, 100, CALLER_PC, mtTest);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(300));
|
||||
tracker.free_memory(file, 0, 300);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(0));
|
||||
tracker.allocate_memory(file, 0, 100, CALLER_PC, mtTest);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(100));
|
||||
tracker.free_memory(file, 50, 10);
|
||||
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(90));
|
||||
};
|
||||
};
|
||||
|
||||
TEST_VM_F(MemoryFileTrackerTest, Basics) {
|
||||
this->basics();
|
||||
}
|
63
test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp
Normal file
63
test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "nmt/nmtNativeCallStackStorage.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
using NCSS = NativeCallStackStorage;
|
||||
|
||||
class NativeCallStackStorageTest : public testing::Test {};
|
||||
|
||||
TEST_VM_F(NativeCallStackStorageTest, DoNotStoreStackIfNotDetailed) {
|
||||
NativeCallStack ncs{};
|
||||
NCSS ncss(false);
|
||||
NCSS::StackIndex si = ncss.push(ncs);
|
||||
EXPECT_TRUE(si.is_invalid());
|
||||
NativeCallStack ncs_received = ncss.get(si);
|
||||
EXPECT_TRUE(ncs_received.is_empty());
|
||||
}
|
||||
|
||||
TEST_VM_F(NativeCallStackStorageTest, CollisionsReceiveDifferentIndexes) {
|
||||
constexpr const int nr_of_stacks = 10;
|
||||
NativeCallStack ncs_arr[nr_of_stacks];
|
||||
for (int i = 0; i < nr_of_stacks; i++) {
|
||||
ncs_arr[i] = NativeCallStack((address*)(&i), 1);
|
||||
}
|
||||
|
||||
NCSS ncss(true, 1);
|
||||
NCSS::StackIndex si_arr[nr_of_stacks];
|
||||
for (int i = 0; i < nr_of_stacks; i++) {
|
||||
si_arr[i] = ncss.push(ncs_arr[i]);
|
||||
}
|
||||
|
||||
// Every SI should be different as every sack is different
|
||||
for (int i = 0; i < nr_of_stacks; i++) {
|
||||
for (int j = 0; j < nr_of_stacks; j++) {
|
||||
if (i == j) continue;
|
||||
EXPECT_FALSE(NCSS::StackIndex::equals(si_arr[i],si_arr[j]));
|
||||
}
|
||||
}
|
||||
}
|
326
test/hotspot/gtest/nmt/test_nmt_treap.cpp
Normal file
326
test/hotspot/gtest/nmt/test_nmt_treap.cpp
Normal file
@ -0,0 +1,326 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nmt/nmtTreap.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class TreapTest : public testing::Test {
|
||||
public:
|
||||
struct Cmp {
|
||||
static int cmp(int a, int b) {
|
||||
return a - b;
|
||||
}
|
||||
};
|
||||
|
||||
struct CmpInverse {
|
||||
static int cmp(int a, int b) {
|
||||
return b - a;
|
||||
}
|
||||
};
|
||||
|
||||
struct FCmp {
|
||||
static int cmp(float a, float b) {
|
||||
if (a < b) return -1;
|
||||
if (a == b) return 0;
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
template<typename K, typename V, typename CMP, typename ALLOC>
|
||||
void verify_it(Treap<K, V, CMP, ALLOC>& t) {
|
||||
t.verify_self();
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
public:
|
||||
void inserting_duplicates_results_in_one_value() {
|
||||
constexpr const int up_to = 10;
|
||||
GrowableArrayCHeap<int, mtTest> nums_seen(up_to, up_to, 0);
|
||||
TreapCHeap<int, int, Cmp> treap;
|
||||
|
||||
for (int i = 0; i < up_to; i++) {
|
||||
treap.upsert(i, i);
|
||||
treap.upsert(i, i);
|
||||
treap.upsert(i, i);
|
||||
treap.upsert(i, i);
|
||||
treap.upsert(i, i);
|
||||
}
|
||||
|
||||
treap.visit_in_order([&](TreapCHeap<int, int, Cmp>::TreapNode* node) {
|
||||
nums_seen.at(node->key())++;
|
||||
});
|
||||
for (int i = 0; i < up_to; i++) {
|
||||
EXPECT_EQ(1, nums_seen.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
void treap_ought_not_leak() {
|
||||
struct LeakCheckedAllocator {
|
||||
int allocations;
|
||||
|
||||
LeakCheckedAllocator()
|
||||
: allocations(0) {
|
||||
}
|
||||
|
||||
void* allocate(size_t sz) {
|
||||
void* allocation = os::malloc(sz, mtTest);
|
||||
if (allocation == nullptr) {
|
||||
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "treap failed allocation");
|
||||
}
|
||||
++allocations;
|
||||
return allocation;
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
--allocations;
|
||||
os::free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
constexpr const int up_to = 10;
|
||||
{
|
||||
Treap<int, int, Cmp, LeakCheckedAllocator> treap;
|
||||
for (int i = 0; i < up_to; i++) {
|
||||
treap.upsert(i, i);
|
||||
}
|
||||
EXPECT_EQ(up_to, treap._allocator.allocations);
|
||||
for (int i = 0; i < up_to; i++) {
|
||||
treap.remove(i);
|
||||
}
|
||||
EXPECT_EQ(0, treap._allocator.allocations);
|
||||
EXPECT_EQ(nullptr, treap._root);
|
||||
}
|
||||
|
||||
{
|
||||
Treap<int, int, Cmp, LeakCheckedAllocator> treap;
|
||||
for (int i = 0; i < up_to; i++) {
|
||||
treap.upsert(i, i);
|
||||
}
|
||||
treap.remove_all();
|
||||
EXPECT_EQ(0, treap._allocator.allocations);
|
||||
EXPECT_EQ(nullptr, treap._root);
|
||||
}
|
||||
}
|
||||
|
||||
void test_find() {
|
||||
struct Empty {};
|
||||
TreapCHeap<float, Empty, FCmp> treap;
|
||||
using Node = TreapCHeap<float, Empty, FCmp>::TreapNode;
|
||||
|
||||
Node* n = nullptr;
|
||||
auto test = [&](float f) {
|
||||
EXPECT_EQ(nullptr, treap.find(treap._root, f));
|
||||
treap.upsert(f, Empty{});
|
||||
Node* n = treap.find(treap._root, f);
|
||||
EXPECT_NE(nullptr, n);
|
||||
EXPECT_EQ(f, n->key());
|
||||
};
|
||||
|
||||
test(1.0f);
|
||||
test(5.0f);
|
||||
test(0.0f);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_VM_F(TreapTest, InsertingDuplicatesResultsInOneValue) {
|
||||
this->inserting_duplicates_results_in_one_value();
|
||||
}
|
||||
|
||||
TEST_VM_F(TreapTest, TreapOughtNotLeak) {
|
||||
this->treap_ought_not_leak();
|
||||
}
|
||||
|
||||
TEST_VM_F(TreapTest, TestVisitors) {
|
||||
{ // Tests with 'default' ordering (ascending)
|
||||
TreapCHeap<int, int, Cmp> treap;
|
||||
using Node = TreapCHeap<int, int, Cmp>::TreapNode;
|
||||
|
||||
treap.visit_range_in_order(0, 100, [&](Node* x) {
|
||||
EXPECT_TRUE(false) << "Empty treap has no nodes to visit";
|
||||
});
|
||||
|
||||
// Single-element set
|
||||
treap.upsert(1, 0);
|
||||
int count = 0;
|
||||
treap.visit_range_in_order(0, 100, [&](Node* x) {
|
||||
count++;
|
||||
});
|
||||
EXPECT_EQ(1, count);
|
||||
|
||||
count = 0;
|
||||
treap.visit_in_order([&](Node* x) {
|
||||
count++;
|
||||
});
|
||||
EXPECT_EQ(1, count);
|
||||
|
||||
// Add an element outside of the range that should not be visited on the right side and
|
||||
// one on the left side.
|
||||
treap.upsert(101, 0);
|
||||
treap.upsert(-1, 0);
|
||||
count = 0;
|
||||
treap.visit_range_in_order(0, 100, [&](Node* x) {
|
||||
count++;
|
||||
});
|
||||
EXPECT_EQ(1, count);
|
||||
|
||||
count = 0;
|
||||
treap.visit_in_order([&](Node* x) {
|
||||
count++;
|
||||
});
|
||||
EXPECT_EQ(3, count);
|
||||
|
||||
// Visiting empty range [0, 0) == {}
|
||||
treap.upsert(0, 0); // This node should not be visited.
|
||||
treap.visit_range_in_order(0, 0, [&](Node* x) {
|
||||
EXPECT_TRUE(false) << "Empty visiting range should not visit any node";
|
||||
});
|
||||
|
||||
treap.remove_all();
|
||||
for (int i = 0; i < 11; i++) {
|
||||
treap.upsert(i, 0);
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
GrowableArray<int> seen;
|
||||
treap.visit_range_in_order(0, 10, [&](Node* x) {
|
||||
seen.push(x->key());
|
||||
});
|
||||
EXPECT_EQ(10, seen.length());
|
||||
for (int i = 0; i < 10; i++) {
|
||||
EXPECT_EQ(i, seen.at(i));
|
||||
}
|
||||
|
||||
seen.clear();
|
||||
treap.visit_in_order([&](Node* x) {
|
||||
seen.push(x->key());
|
||||
});
|
||||
EXPECT_EQ(11, seen.length());
|
||||
for (int i = 0; i < 10; i++) {
|
||||
EXPECT_EQ(i, seen.at(i));
|
||||
}
|
||||
|
||||
seen.clear();
|
||||
treap.visit_range_in_order(10, 12, [&](Node* x) {
|
||||
seen.push(x->key());
|
||||
});
|
||||
EXPECT_EQ(1, seen.length());
|
||||
EXPECT_EQ(10, seen.at(0));
|
||||
}
|
||||
{ // Test with descending ordering
|
||||
TreapCHeap<int, int, CmpInverse> treap;
|
||||
using Node = TreapCHeap<int, int, CmpInverse>::TreapNode;
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
treap.upsert(i, 0);
|
||||
}
|
||||
ResourceMark rm;
|
||||
GrowableArray<int> seen;
|
||||
treap.visit_range_in_order(9, -1, [&](Node* x) {
|
||||
seen.push(x->key());
|
||||
});
|
||||
EXPECT_EQ(10, seen.length());
|
||||
for (int i = 0; i < 10; i++) {
|
||||
EXPECT_EQ(10-i-1, seen.at(i));
|
||||
}
|
||||
seen.clear();
|
||||
|
||||
treap.visit_in_order([&](Node* x) {
|
||||
seen.push(x->key());
|
||||
});
|
||||
EXPECT_EQ(10, seen.length());
|
||||
for (int i = 0; i < 10; i++) {
|
||||
EXPECT_EQ(10 - i - 1, seen.at(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_VM_F(TreapTest, TestFind) {
|
||||
test_find();
|
||||
}
|
||||
|
||||
TEST_VM_F(TreapTest, TestClosestLeq) {
|
||||
using Node = TreapCHeap<int, int, Cmp>::TreapNode;
|
||||
{
|
||||
TreapCHeap<int, int, Cmp> treap;
|
||||
Node* n = treap.closest_leq(0);
|
||||
EXPECT_EQ(nullptr, n);
|
||||
|
||||
treap.upsert(0, 0);
|
||||
n = treap.closest_leq(0);
|
||||
EXPECT_EQ(0, n->key());
|
||||
|
||||
treap.upsert(-1, -1);
|
||||
n = treap.closest_leq(0);
|
||||
EXPECT_EQ(0, n->key());
|
||||
|
||||
treap.upsert(6, 0);
|
||||
n = treap.closest_leq(6);
|
||||
EXPECT_EQ(6, n->key());
|
||||
|
||||
n = treap.closest_leq(-2);
|
||||
EXPECT_EQ(nullptr, n);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
TEST_VM_F(TreapTest, VerifyItThroughStressTest) {
|
||||
{ // Repeatedly verify a treap of moderate size
|
||||
TreapCHeap<int, int, Cmp> treap;
|
||||
constexpr const int ten_thousand = 10000;
|
||||
for (int i = 0; i < ten_thousand; i++) {
|
||||
int r = os::random();
|
||||
if (r % 2 == 0) {
|
||||
treap.upsert(i, i);
|
||||
} else {
|
||||
treap.remove(i);
|
||||
}
|
||||
verify_it(treap);
|
||||
}
|
||||
for (int i = 0; i < ten_thousand; i++) {
|
||||
int r = os::random();
|
||||
if (r % 2 == 0) {
|
||||
treap.upsert(i, i);
|
||||
} else {
|
||||
treap.remove(i);
|
||||
}
|
||||
verify_it(treap);
|
||||
}
|
||||
}
|
||||
{ // Make a very large treap and verify at the end
|
||||
struct Nothing {};
|
||||
TreapCHeap<int, Nothing, Cmp> treap;
|
||||
constexpr const int five_million = 5000000;
|
||||
for (int i = 0; i < five_million; i++) {
|
||||
treap.upsert(i, Nothing());
|
||||
}
|
||||
verify_it(treap);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
530
test/hotspot/gtest/nmt/test_vmatree.cpp
Normal file
530
test/hotspot/gtest/nmt/test_vmatree.cpp
Normal file
@ -0,0 +1,530 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "nmt/memflags.hpp"
|
||||
#include "nmt/nmtNativeCallStackStorage.hpp"
|
||||
#include "nmt/vmatree.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
using Tree = VMATree;
|
||||
using Node = Tree::TreapNode;
|
||||
using NCS = NativeCallStackStorage;
|
||||
|
||||
class VMATreeTest : public testing::Test {
|
||||
public:
|
||||
NCS ncs;
|
||||
constexpr static const int si_len = 2;
|
||||
NCS::StackIndex si[si_len];
|
||||
NativeCallStack stacks[si_len];
|
||||
|
||||
VMATreeTest() : ncs(true) {
|
||||
stacks[0] = make_stack(0xA);
|
||||
stacks[1] = make_stack(0xB);
|
||||
si[0] = ncs.push(stacks[0]);
|
||||
si[1] = ncs.push(stacks[0]);
|
||||
}
|
||||
|
||||
// Utilities
|
||||
|
||||
VMATree::TreapNode* treap_root(VMATree& tree) {
|
||||
return tree._tree._root;
|
||||
}
|
||||
|
||||
VMATree::VMATreap& treap(VMATree& tree) {
|
||||
return tree._tree;
|
||||
}
|
||||
|
||||
VMATree::TreapNode* find(VMATree::VMATreap& treap, const VMATree::position key) {
|
||||
return treap.find(treap._root, key);
|
||||
}
|
||||
|
||||
NativeCallStack make_stack(size_t a) {
|
||||
NativeCallStack stack((address*)&a, 1);
|
||||
return stack;
|
||||
}
|
||||
|
||||
VMATree::StateType in_type_of(VMATree::TreapNode* x) {
|
||||
return x->val().in.type();
|
||||
}
|
||||
|
||||
VMATree::StateType out_type_of(VMATree::TreapNode* x) {
|
||||
return x->val().out.type();
|
||||
}
|
||||
|
||||
int count_nodes(Tree& tree) {
|
||||
int count = 0;
|
||||
treap(tree).visit_in_order([&](Node* x) {
|
||||
++count;
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
// Tests
|
||||
// Adjacent reservations are merged if the properties match.
|
||||
void adjacent_2_nodes(const VMATree::RegionData& rd) {
|
||||
Tree tree;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
tree.reserve_mapping(i * 100, 100, rd);
|
||||
}
|
||||
EXPECT_EQ(2, count_nodes(tree));
|
||||
|
||||
// Reserving the exact same space again should result in still having only 2 nodes
|
||||
for (int i = 0; i < 10; i++) {
|
||||
tree.reserve_mapping(i * 100, 100, rd);
|
||||
}
|
||||
EXPECT_EQ(2, count_nodes(tree));
|
||||
|
||||
// Do it backwards instead.
|
||||
Tree tree2;
|
||||
for (int i = 9; i >= 0; i--) {
|
||||
tree2.reserve_mapping(i * 100, 100, rd);
|
||||
}
|
||||
EXPECT_EQ(2, count_nodes(tree2));
|
||||
}
|
||||
|
||||
// After removing all ranges we should be left with an entirely empty tree
|
||||
void remove_all_leaves_empty_tree(const VMATree::RegionData& rd) {
|
||||
Tree tree;
|
||||
tree.reserve_mapping(0, 100 * 10, rd);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
tree.release_mapping(i * 100, 100);
|
||||
}
|
||||
EXPECT_EQ(nullptr, treap_root(tree));
|
||||
|
||||
// Other way around
|
||||
tree.reserve_mapping(0, 100 * 10, rd);
|
||||
for (int i = 9; i >= 0; i--) {
|
||||
tree.release_mapping(i * 100, 100);
|
||||
}
|
||||
EXPECT_EQ(nullptr, treap_root(tree));
|
||||
}
|
||||
|
||||
// Committing in a whole reserved range results in 2 nodes
|
||||
void commit_whole(const VMATree::RegionData& rd) {
|
||||
Tree tree;
|
||||
tree.reserve_mapping(0, 100 * 10, rd);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
tree.commit_mapping(i * 100, 100, rd);
|
||||
}
|
||||
treap(tree).visit_in_order([&](Node* x) {
|
||||
VMATree::StateType in = in_type_of(x);
|
||||
VMATree::StateType out = out_type_of(x);
|
||||
EXPECT_TRUE((in == VMATree::StateType::Released && out == VMATree::StateType::Committed) ||
|
||||
(in == VMATree::StateType::Committed && out == VMATree::StateType::Released));
|
||||
});
|
||||
EXPECT_EQ(2, count_nodes(tree));
|
||||
}
|
||||
|
||||
// Committing in middle of reservation ends with a sequence of 4 nodes
|
||||
void commit_middle(const VMATree::RegionData& rd) {
|
||||
Tree tree;
|
||||
tree.reserve_mapping(0, 100, rd);
|
||||
tree.commit_mapping(50, 25, rd);
|
||||
|
||||
size_t found[16];
|
||||
size_t wanted[4] = {0, 50, 75, 100};
|
||||
auto exists = [&](size_t x) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (wanted[i] == x) return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
int i = 0;
|
||||
treap(tree).visit_in_order([&](Node* x) {
|
||||
if (i < 16) {
|
||||
found[i] = x->key();
|
||||
}
|
||||
i++;
|
||||
});
|
||||
|
||||
ASSERT_EQ(4, i) << "0 - 50 - 75 - 100 nodes expected";
|
||||
EXPECT_TRUE(exists(found[0]));
|
||||
EXPECT_TRUE(exists(found[1]));
|
||||
EXPECT_TRUE(exists(found[2]));
|
||||
EXPECT_TRUE(exists(found[3]));
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
TEST_VM_F(VMATreeTest, OverlappingReservationsResultInTwoNodes) {
|
||||
VMATree::RegionData rd{si[0], mtTest};
|
||||
Tree tree;
|
||||
for (int i = 99; i >= 0; i--) {
|
||||
tree.reserve_mapping(i * 100, 101, rd);
|
||||
}
|
||||
EXPECT_EQ(2, count_nodes(tree));
|
||||
}
|
||||
|
||||
// Low-level tests inspecting the state of the tree.
|
||||
TEST_VM_F(VMATreeTest, LowLevel) {
|
||||
adjacent_2_nodes(VMATree::empty_regiondata);
|
||||
remove_all_leaves_empty_tree(VMATree::empty_regiondata);
|
||||
commit_middle(VMATree::empty_regiondata);
|
||||
commit_whole(VMATree::empty_regiondata);
|
||||
|
||||
VMATree::RegionData rd{si[0], mtTest };
|
||||
adjacent_2_nodes(rd);
|
||||
remove_all_leaves_empty_tree(rd);
|
||||
commit_middle(rd);
|
||||
commit_whole(rd);
|
||||
|
||||
{ // Identical operation but different metadata should not merge
|
||||
Tree tree;
|
||||
VMATree::RegionData rd{si[0], mtTest };
|
||||
VMATree::RegionData rd2{si[1], mtNMT };
|
||||
tree.reserve_mapping(0, 100, rd);
|
||||
tree.reserve_mapping(100, 100, rd2);
|
||||
|
||||
EXPECT_EQ(3, count_nodes(tree));
|
||||
int found_nodes = 0;
|
||||
}
|
||||
|
||||
{ // Reserving after commit should overwrite commit
|
||||
Tree tree;
|
||||
VMATree::RegionData rd{si[0], mtTest };
|
||||
VMATree::RegionData rd2{si[1], mtNMT };
|
||||
tree.commit_mapping(50, 50, rd2);
|
||||
tree.reserve_mapping(0, 100, rd);
|
||||
treap(tree).visit_in_order([&](Node* x) {
|
||||
EXPECT_TRUE(x->key() == 0 || x->key() == 100);
|
||||
if (x->key() == 0) {
|
||||
EXPECT_EQ(x->val().out.regiondata().flag, mtTest);
|
||||
}
|
||||
});
|
||||
|
||||
EXPECT_EQ(2, count_nodes(tree));
|
||||
}
|
||||
|
||||
{ // Split a reserved region into two different reserved regions
|
||||
Tree tree;
|
||||
VMATree::RegionData rd{si[0], mtTest };
|
||||
VMATree::RegionData rd2{si[1], mtNMT };
|
||||
VMATree::RegionData rd3{si[0], mtNone };
|
||||
tree.reserve_mapping(0, 100, rd);
|
||||
tree.reserve_mapping(0, 50, rd2);
|
||||
tree.reserve_mapping(50, 50, rd3);
|
||||
|
||||
EXPECT_EQ(3, count_nodes(tree));
|
||||
}
|
||||
{ // One big reserve + release leaves an empty tree
|
||||
Tree::RegionData rd{si[0], mtNMT};
|
||||
Tree tree;
|
||||
tree.reserve_mapping(0, 500000, rd);
|
||||
tree.release_mapping(0, 500000);
|
||||
|
||||
EXPECT_EQ(nullptr, treap_root(tree));
|
||||
}
|
||||
{ // A committed region inside of/replacing a reserved region
|
||||
// should replace the reserved region's metadata.
|
||||
Tree::RegionData rd{si[0], mtNMT};
|
||||
VMATree::RegionData rd2{si[1], mtTest};
|
||||
Tree tree;
|
||||
tree.reserve_mapping(0, 100, rd);
|
||||
tree.commit_mapping(0, 100, rd2);
|
||||
treap(tree).visit_range_in_order(0, 99999, [&](Node* x) {
|
||||
if (x->key() == 0) {
|
||||
EXPECT_EQ(mtTest, x->val().out.regiondata().flag);
|
||||
}
|
||||
if (x->key() == 100) {
|
||||
EXPECT_EQ(mtTest, x->val().in.regiondata().flag);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{ // Attempting to reserve or commit an empty region should not change the tree.
|
||||
Tree tree;
|
||||
Tree::RegionData rd{si[0], mtNMT};
|
||||
tree.reserve_mapping(0, 0, rd);
|
||||
EXPECT_EQ(nullptr, treap_root(tree));
|
||||
tree.commit_mapping(0, 0, rd);
|
||||
EXPECT_EQ(nullptr, treap_root(tree));
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for summary accounting
|
||||
TEST_VM_F(VMATreeTest, SummaryAccounting) {
|
||||
{ // Fully enclosed re-reserving works correctly.
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
Tree::RegionData rd2(NCS::StackIndex(), mtNMT);
|
||||
Tree tree;
|
||||
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd);
|
||||
VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(100, diff.reserve);
|
||||
all_diff = tree.reserve_mapping(50, 25, rd2);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
VMATree::SingleDiff diff2 = all_diff.flag[NMTUtil::flag_to_index(mtNMT)];
|
||||
EXPECT_EQ(-25, diff.reserve);
|
||||
EXPECT_EQ(25, diff2.reserve);
|
||||
}
|
||||
{ // Fully release reserved mapping
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
Tree tree;
|
||||
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd);
|
||||
VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(100, diff.reserve);
|
||||
all_diff = tree.release_mapping(0, 100);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(-100, diff.reserve);
|
||||
}
|
||||
{ // Convert some of a released mapping to a committed one
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
Tree tree;
|
||||
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd);
|
||||
VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(diff.reserve, 100);
|
||||
all_diff = tree.commit_mapping(0, 100, rd);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(0, diff.reserve);
|
||||
EXPECT_EQ(100, diff.commit);
|
||||
}
|
||||
{ // Adjacent reserved mappings with same flag
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
Tree tree;
|
||||
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd);
|
||||
VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(diff.reserve, 100);
|
||||
all_diff = tree.reserve_mapping(100, 100, rd);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(100, diff.reserve);
|
||||
}
|
||||
{ // Adjacent reserved mappings with different flags
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
Tree::RegionData rd2(NCS::StackIndex(), mtNMT);
|
||||
Tree tree;
|
||||
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd);
|
||||
VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(diff.reserve, 100);
|
||||
all_diff = tree.reserve_mapping(100, 100, rd2);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)];
|
||||
EXPECT_EQ(0, diff.reserve);
|
||||
diff = all_diff.flag[NMTUtil::flag_to_index(mtNMT)];
|
||||
EXPECT_EQ(100, diff.reserve);
|
||||
}
|
||||
|
||||
{ // A commit with two previous commits inside of it should only register
|
||||
// the new memory in the commit diff.
|
||||
Tree tree;
|
||||
Tree::RegionData rd(NCS::StackIndex(), mtTest);
|
||||
tree.commit_mapping(128, 128, rd);
|
||||
tree.commit_mapping(512, 128, rd);
|
||||
VMATree::SummaryDiff diff = tree.commit_mapping(0, 1024, rd);
|
||||
EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].commit);
|
||||
EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].reserve);
|
||||
}
|
||||
}
|
||||
|
||||
// Exceedingly simple tracker for page-granular allocations
|
||||
// Use it for testing consistency with VMATree.
|
||||
struct SimpleVMATracker : public CHeapObj<mtTest> {
|
||||
const size_t page_size = 4096;
|
||||
enum Type { Reserved, Committed, Free };
|
||||
struct Info {
|
||||
Type type;
|
||||
MEMFLAGS flag;
|
||||
NativeCallStack stack;
|
||||
Info() : type(Free), flag(mtNone), stack() {}
|
||||
|
||||
Info(Type type, NativeCallStack stack, MEMFLAGS flag)
|
||||
: type(type), flag(flag), stack(stack) {}
|
||||
|
||||
bool eq(Info other) {
|
||||
return flag == other.flag && stack.equals(other.stack);
|
||||
}
|
||||
};
|
||||
// Page (4KiB) granular array
|
||||
static constexpr const size_t num_pages = 1024 * 512;
|
||||
Info pages[num_pages];
|
||||
|
||||
SimpleVMATracker()
|
||||
: pages() {
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
pages[i] = Info();
|
||||
}
|
||||
}
|
||||
|
||||
VMATree::SummaryDiff do_it(Type type, size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) {
|
||||
assert(is_aligned(size, page_size) && is_aligned(start, page_size), "page alignment");
|
||||
|
||||
VMATree::SummaryDiff diff;
|
||||
const size_t page_count = size / page_size;
|
||||
const size_t start_idx = start / page_size;
|
||||
const size_t end_idx = start_idx + page_count;
|
||||
assert(end_idx < SimpleVMATracker::num_pages, "");
|
||||
|
||||
Info new_info(type, stack, flag);
|
||||
for (size_t i = start_idx; i < end_idx; i++) {
|
||||
Info& old_info = pages[i];
|
||||
|
||||
// Register diff
|
||||
if (old_info.type == Reserved) {
|
||||
diff.flag[(int)old_info.flag].reserve -= page_size;
|
||||
} else if (old_info.type == Committed) {
|
||||
diff.flag[(int)old_info.flag].reserve -= page_size;
|
||||
diff.flag[(int)old_info.flag].commit -= page_size;
|
||||
}
|
||||
|
||||
if (type == Reserved) {
|
||||
diff.flag[(int)new_info.flag].reserve += page_size;
|
||||
} else if(type == Committed) {
|
||||
diff.flag[(int)new_info.flag].reserve += page_size;
|
||||
diff.flag[(int)new_info.flag].commit += page_size;
|
||||
}
|
||||
// Overwrite old one with new
|
||||
pages[i] = new_info;
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
||||
VMATree::SummaryDiff reserve(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) {
|
||||
return do_it(Reserved, start, size, stack, flag);
|
||||
}
|
||||
|
||||
VMATree::SummaryDiff commit(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) {
|
||||
return do_it(Committed, start, size, stack, flag);
|
||||
}
|
||||
|
||||
VMATree::SummaryDiff release(size_t start, size_t size) {
|
||||
return do_it(Free, start, size, NativeCallStack(), mtNone);
|
||||
}
|
||||
};
|
||||
|
||||
constexpr const size_t SimpleVMATracker::num_pages;
|
||||
|
||||
TEST_VM_F(VMATreeTest, TestConsistencyWithSimpleTracker) {
|
||||
// In this test we use ASSERT macros from gtest instead of EXPECT
|
||||
// as any error will propagate and become larger as the test progresses.
|
||||
SimpleVMATracker* tr = new SimpleVMATracker();
|
||||
const size_t page_size = tr->page_size;
|
||||
VMATree tree;
|
||||
NCS ncss(true);
|
||||
constexpr const int candidates_len_flags = 4;
|
||||
constexpr const int candidates_len_stacks = 2;
|
||||
|
||||
NativeCallStack candidate_stacks[candidates_len_stacks] = {
|
||||
make_stack(0xA),
|
||||
make_stack(0xB),
|
||||
};
|
||||
|
||||
const MEMFLAGS candidate_flags[candidates_len_flags] = {
|
||||
mtNMT,
|
||||
mtTest,
|
||||
mtGC,
|
||||
mtCompiler
|
||||
};
|
||||
|
||||
const int operation_count = 100000; // One hundred thousand
|
||||
for (int i = 0; i < operation_count; i++) {
|
||||
size_t page_start = (size_t)(os::random() % SimpleVMATracker::num_pages);
|
||||
size_t page_end = (size_t)(os::random() % (SimpleVMATracker::num_pages));
|
||||
|
||||
if (page_end < page_start) {
|
||||
const size_t temp = page_start;
|
||||
page_start = page_end;
|
||||
page_end = page_start;
|
||||
}
|
||||
const size_t num_pages = page_end - page_start;
|
||||
|
||||
if (num_pages == 0) {
|
||||
i--; continue;
|
||||
}
|
||||
|
||||
const size_t start = page_start * page_size;
|
||||
const size_t size = num_pages * page_size;
|
||||
|
||||
const MEMFLAGS flag = candidate_flags[os::random() % candidates_len_flags];
|
||||
const NativeCallStack stack = candidate_stacks[os::random() % candidates_len_stacks];
|
||||
|
||||
const NCS::StackIndex si = ncss.push(stack);
|
||||
VMATree::RegionData data(si, flag);
|
||||
|
||||
const SimpleVMATracker::Type type = (SimpleVMATracker::Type)(os::random() % 3);
|
||||
|
||||
VMATree::SummaryDiff tree_diff;
|
||||
VMATree::SummaryDiff simple_diff;
|
||||
if (type == SimpleVMATracker::Reserved) {
|
||||
simple_diff = tr->reserve(start, size, stack, flag);
|
||||
tree_diff = tree.reserve_mapping(start, size, data);
|
||||
} else if (type == SimpleVMATracker::Committed) {
|
||||
simple_diff = tr->commit(start, size, stack, flag);
|
||||
tree_diff = tree.commit_mapping(start, size, data);
|
||||
} else {
|
||||
simple_diff = tr->release(start, size);
|
||||
tree_diff = tree.release_mapping(start, size);
|
||||
}
|
||||
|
||||
for (int j = 0; j < mt_number_of_types; j++) {
|
||||
VMATree::SingleDiff td = tree_diff.flag[j];
|
||||
VMATree::SingleDiff sd = simple_diff.flag[j];
|
||||
ASSERT_EQ(td.reserve, sd.reserve);
|
||||
ASSERT_EQ(td.commit, sd.commit);
|
||||
}
|
||||
|
||||
|
||||
// Do an in-depth check every 25 000 iterations.
|
||||
if (i % 25000 == 0) {
|
||||
size_t j = 0;
|
||||
while (j < SimpleVMATracker::num_pages) {
|
||||
while (j < SimpleVMATracker::num_pages &&
|
||||
tr->pages[j].type == SimpleVMATracker::Free) {
|
||||
j++;
|
||||
}
|
||||
|
||||
if (j == SimpleVMATracker::num_pages) {
|
||||
break;
|
||||
}
|
||||
|
||||
size_t start = j;
|
||||
SimpleVMATracker::Info starti = tr->pages[start];
|
||||
|
||||
while (j < SimpleVMATracker::num_pages &&
|
||||
tr->pages[j].eq(starti)) {
|
||||
j++;
|
||||
}
|
||||
|
||||
size_t end = j-1;
|
||||
ASSERT_LE(end, SimpleVMATracker::num_pages);
|
||||
SimpleVMATracker::Info endi = tr->pages[end];
|
||||
|
||||
VMATree::VMATreap& treap = this->treap(tree);
|
||||
VMATree::TreapNode* startn = find(treap, start * page_size);
|
||||
ASSERT_NE(nullptr, startn);
|
||||
VMATree::TreapNode* endn = find(treap, (end * page_size) + page_size);
|
||||
ASSERT_NE(nullptr, endn);
|
||||
|
||||
const NativeCallStack& start_stack = ncss.get(startn->val().out.stack());
|
||||
const NativeCallStack& end_stack = ncss.get(endn->val().in.stack());
|
||||
ASSERT_TRUE(starti.stack.equals(start_stack));
|
||||
ASSERT_TRUE(endi.stack.equals(end_stack));
|
||||
|
||||
ASSERT_EQ(starti.flag, startn->val().out.flag());
|
||||
ASSERT_EQ(endi.flag, endn->val().in.flag());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,20 +86,12 @@ public class TestZNMT {
|
||||
if (zForceDiscontiguousHeapReservations > 1) {
|
||||
oa.shouldContain("Address Space Type: Discontiguous");
|
||||
}
|
||||
|
||||
if (XmsInM < XmxInM) {
|
||||
// There will be reservations which are smaller than the total
|
||||
// memory allocated in TestZNMT.Test.main. This means that some
|
||||
// reservation will be completely committed and print the following
|
||||
// in the NMT statistics.
|
||||
oa.shouldMatch("reserved and committed \\d+ for Java Heap");
|
||||
}
|
||||
// We expect to have a report of this type.
|
||||
oa.shouldMatch("ZGC heap backing file");
|
||||
oa.shouldMatch("allocated \\d+ for Java Heap");
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
testValue(0);
|
||||
testValue(1);
|
||||
testValue(2);
|
||||
testValue(100);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user