8337563: NMT: rename MEMFLAGS to MemTag

Reviewed-by: dholmes, coleenp, jsjolen
This commit is contained in:
Gerard Ziemski 2024-09-17 19:59:06 +00:00
parent d5881825ef
commit eabfc6e4d9
125 changed files with 1285 additions and 1279 deletions

View File

@ -4602,7 +4602,7 @@ static void workaround_expand_exec_shield_cs_limit() {
return; // No matter, we tried, best effort.
}
MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
MemTracker::record_virtual_memory_tag((address)codebuf, mtInternal);
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);

View File

@ -367,7 +367,7 @@ bool os::dir_is_empty(const char* path) {
return result;
}
static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MEMFLAGS flag) {
static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MemTag mem_tag) {
char * addr;
int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
if (requested_addr != nullptr) {
@ -382,7 +382,7 @@ static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MEMFLAGS
flags, -1, 0);
if (addr != MAP_FAILED) {
MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC, flag);
MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC, mem_tag);
return addr;
}
return nullptr;
@ -495,7 +495,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
return chop_extra_memory(size, alignment, extra_base, extra_size);
}
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag) {
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag) {
size_t extra_size = calculate_aligned_extra_size(size, alignment);
// For file mapping, we do not call os:map_memory_to_file(size,fd) since:
// - we later chop away parts of the mapping using os::release_memory and that could fail if the
@ -503,7 +503,7 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_des
// - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
// mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
// chopping off and unmapping excess bits back and front (see below) would not work.
char* extra_base = reserve_mmapped_memory(extra_size, nullptr, flag);
char* extra_base = reserve_mmapped_memory(extra_size, nullptr, mem_tag);
if (extra_base == nullptr) {
return nullptr;
}

View File

@ -3428,7 +3428,7 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag = mtNone) {
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag = mtNone) {
assert(is_aligned(alignment, os::vm_allocation_granularity()),
"Alignment must be a multiple of allocation granularity (page size)");
assert(is_aligned(size, os::vm_allocation_granularity()),
@ -3441,8 +3441,8 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
static const int max_attempts = 20;
for (int attempt = 0; attempt < max_attempts && aligned_base == nullptr; attempt ++) {
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, flag) :
os::reserve_memory(extra_size, false, flag);
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, mem_tag) :
os::reserve_memory(extra_size, false, mem_tag);
if (extra_base == nullptr) {
return nullptr;
}
@ -3458,8 +3458,8 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
// Which may fail, hence the loop.
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, flag) :
os::attempt_reserve_memory_at(aligned_base, size, false, flag);
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, mem_tag) :
os::attempt_reserve_memory_at(aligned_base, size, false, mem_tag);
}
assert(aligned_base != nullptr,
@ -3473,8 +3473,8 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
}
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag) {
return map_or_reserve_memory_aligned(size, alignment, fd, flag);
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag) {
return map_or_reserve_memory_aligned(size, alignment, fd, mem_tag);
}
char* os::pd_reserve_memory(size_t bytes, bool exec) {

View File

@ -1716,10 +1716,10 @@ void FileMapInfo::close() {
*/
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec, MEMFLAGS flags = mtNone) {
bool allow_exec, MemTag mem_tag = mtNone) {
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
AlwaysPreTouch ? false : read_only,
allow_exec, flags);
allow_exec, mem_tag);
if (mem != nullptr && AlwaysPreTouch) {
os::pretouch_memory(mem, mem + bytes);
}
@ -2178,7 +2178,7 @@ bool FileMapInfo::map_heap_region_impl() {
_mapped_heap_memregion = MemRegion(start, word_size);
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_type()
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
// for mapped region as it is part of the reserved java heap, which is already recorded.
char* addr = (char*)_mapped_heap_memregion.start();
char* base;

View File

@ -1299,7 +1299,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
assert(base_address == nullptr ||
(address)archive_space_rs.base() == base_address, "Sanity");
// Register archive space with NMT.
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
MemTracker::record_virtual_memory_tag(archive_space_rs.base(), mtClassShared);
return archive_space_rs.base();
}
return nullptr;
@ -1361,8 +1361,8 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
return nullptr;
}
// NMT: fix up the space tags
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
MemTracker::record_virtual_memory_tag(archive_space_rs.base(), mtClassShared);
MemTracker::record_virtual_memory_tag(class_space_rs.base(), mtClass);
} else {
if (use_archive_base_addr && base_address != nullptr) {
total_space_rs = ReservedSpace(total_range_size, base_address_alignment,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
template <typename E, MEMFLAGS F>
template <typename E, MemTag MT>
class GrowableArrayCHeap;
// G1AbstractSubTask represents a task to be performed either within a

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,22 +29,22 @@
#include "runtime/vmOperations.hpp"
#include "utilities/globalCounter.inline.hpp"
G1MonotonicArena::Segment::Segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS flag) :
G1MonotonicArena::Segment::Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag) :
_slot_size(slot_size),
_num_slots(num_slots),
_next(next),
_next_allocate(0),
_mem_flag(flag) {
_mem_tag(mem_tag) {
_bottom = ((char*) this) + header_size();
}
G1MonotonicArena::Segment* G1MonotonicArena::Segment::create_segment(uint slot_size,
uint num_slots,
Segment* next,
MEMFLAGS mem_flag) {
MemTag mem_tag) {
size_t block_size = size_in_bytes(slot_size, num_slots);
char* alloc_block = NEW_C_HEAP_ARRAY(char, block_size, mem_flag);
return new (alloc_block) Segment(slot_size, num_slots, next, mem_flag);
char* alloc_block = NEW_C_HEAP_ARRAY(char, block_size, mem_tag);
return new (alloc_block) Segment(slot_size, num_slots, next, mem_tag);
}
void G1MonotonicArena::Segment::delete_segment(Segment* segment) {
@ -54,7 +54,7 @@ void G1MonotonicArena::Segment::delete_segment(Segment* segment) {
GlobalCounter::write_synchronize();
}
segment->~Segment();
FREE_C_HEAP_ARRAY(_mem_flag, segment);
FREE_C_HEAP_ARRAY(_mem_tag, segment);
}
void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
@ -108,7 +108,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
uint prev_num_slots = (prev != nullptr) ? prev->num_slots() : 0;
uint num_slots = _alloc_options->next_num_slots(prev_num_slots);
next = Segment::create_segment(slot_size(), num_slots, prev, _alloc_options->mem_flag());
next = Segment::create_segment(slot_size(), num_slots, prev, _alloc_options->mem_tag());
} else {
assert(slot_size() == next->slot_size() ,
"Mismatch %d != %d", slot_size(), next->slot_size());

View File

@ -27,7 +27,7 @@
#define SHARE_GC_G1_G1MONOTONICARENA_HPP
#include "gc/shared/freeListAllocator.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
@ -120,7 +120,7 @@ class G1MonotonicArena::Segment {
// to _num_slots (can be larger because we atomically increment this value and
// check only afterwards if the allocation has been successful).
uint volatile _next_allocate;
const MEMFLAGS _mem_flag;
const MemTag _mem_tag;
char* _bottom; // Actual data.
// Do not add class member variables beyond this point
@ -136,7 +136,7 @@ class G1MonotonicArena::Segment {
NONCOPYABLE(Segment);
Segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS flag);
Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
~Segment() = default;
public:
Segment* volatile* next_addr() { return &_next; }
@ -173,7 +173,7 @@ public:
return header_size() + payload_size(slot_size, num_slots);
}
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS mem_flag);
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
static void delete_segment(Segment* segment);
// Copies the contents of this segment into the destination.
@ -222,7 +222,7 @@ public:
class G1MonotonicArena::AllocOptions {
protected:
const MEMFLAGS _mem_flag;
const MemTag _mem_tag;
const uint _slot_size;
const uint _initial_num_slots;
// Defines a limit to the number of slots in the segment
@ -230,8 +230,8 @@ protected:
const uint _slot_alignment;
public:
AllocOptions(MEMFLAGS mem_flag, uint slot_size, uint initial_num_slots, uint max_num_slots, uint alignment) :
_mem_flag(mem_flag),
AllocOptions(MemTag mem_tag, uint slot_size, uint initial_num_slots, uint max_num_slots, uint alignment) :
_mem_tag(mem_tag),
_slot_size(align_up(slot_size, alignment)),
_initial_num_slots(initial_num_slots),
_max_num_slots(max_num_slots),
@ -250,7 +250,7 @@ public:
uint slot_alignment() const { return _slot_alignment; }
MEMFLAGS mem_flag() const {return _mem_flag; }
MemTag mem_tag() const {return _mem_tag; }
};
#endif //SHARE_GC_G1_MONOTONICARENA_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,15 +40,15 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MEMFLAGS type) :
MemTag mem_tag) :
_listener(nullptr),
_storage(rs, used_size, page_size),
_region_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
_memory_type(type) {
_memory_tag(mem_tag) {
guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
MemTracker::record_virtual_memory_type((address)rs.base(), type);
MemTracker::record_virtual_memory_tag((address)rs.base(), mem_tag);
}
// Used to manually signal a mapper to handle a set of regions as committed.
@ -72,8 +72,8 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MEMFLAGS type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
MemTag mem_tag) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, mem_tag),
_pages_per_region(alloc_granularity / (page_size * commit_factor)) {
guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
@ -97,7 +97,7 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
const size_t start_page = (size_t)start_idx * _pages_per_region;
const size_t size_in_pages = num_regions * _pages_per_region;
bool zero_filled = _storage.commit(start_page, size_in_pages);
if (_memory_type == mtJavaHeap) {
if (_memory_tag == mtJavaHeap) {
for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
void* address = _storage.page_start(region_index * _pages_per_region);
size_t size_in_bytes = _storage.page_size() * _pages_per_region;
@ -150,7 +150,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
}
void numa_request_on_node(size_t page_idx) {
if (_memory_type == mtJavaHeap) {
if (_memory_tag == mtJavaHeap) {
uint region = (uint)(page_idx * _regions_per_page);
void* address = _storage.page_start(page_idx);
size_t size_in_bytes = _storage.page_size();
@ -164,8 +164,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MEMFLAGS type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
MemTag mem_tag) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, mem_tag),
_regions_per_page((page_size * commit_factor) / alloc_granularity),
_lock(Mutex::service-3, "G1Mapper_lock") {
@ -263,10 +263,10 @@ G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MEMFLAGS type) {
MemTag mem_tag) {
if (region_granularity >= (page_size * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, mem_tag);
} else {
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, mem_tag);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,9 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
// Mapping management
CHeapBitMap _region_commit_map;
MEMFLAGS _memory_type;
MemTag _memory_tag;
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MEMFLAGS type);
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemTag mem_tag);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public:
@ -85,7 +85,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
size_t page_size,
size_t region_granularity,
size_t byte_translation_factor,
MEMFLAGS type);
MemTag mem_tag);
};
#endif // SHARE_GC_G1_G1REGIONTOSPACEMAPPER_HPP

View File

@ -51,7 +51,7 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
if (!backing_store.is_reserved()) {
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
}
MemTracker::record_virtual_memory_type(backing_store.base(), mtGC);
MemTracker::record_virtual_memory_tag(backing_store.base(), mtGC);
// We do not commit any memory initially
_virtual_space.initialize(backing_store);

View File

@ -51,7 +51,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes,
rs.base(), rs.size(), used_page_sz);
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
_virtual_space = new PSVirtualSpace(rs, page_sz);
if (_virtual_space != nullptr && _virtual_space->expand_by(_reserved_byte_size)) {

View File

@ -235,7 +235,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
rs.size(), page_sz);
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
if (vspace != nullptr) {

View File

@ -42,7 +42,7 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
if (!_vs.initialize(rs, 0)) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");

View File

@ -84,7 +84,7 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
MAX2(_page_size, os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
MemTracker::record_virtual_memory_tag((address)heap_rs.base(), mtGC);
os::trace_page_sizes("Card Table", num_bytes, num_bytes,
heap_rs.base(), heap_rs.size(), _page_size);

View File

@ -127,10 +127,10 @@ OopStorage::ActiveArray::~ActiveArray() {
}
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
MEMFLAGS memflags,
MemTag mem_tag,
AllocFailType alloc_fail) {
size_t size_in_bytes = blocks_offset() + sizeof(Block*) * size;
void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, memflags, CURRENT_PC, alloc_fail);
void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, mem_tag, CURRENT_PC, alloc_fail);
if (mem == nullptr) return nullptr;
return new (mem) ActiveArray(size);
}
@ -343,7 +343,7 @@ OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
// _data must be first member: aligning block => aligning _data.
STATIC_ASSERT(_data_pos == 0);
size_t size_needed = allocation_size();
void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, owner->memflags());
void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, owner->mem_tag());
if (memory == nullptr) {
return nullptr;
}
@ -575,7 +575,7 @@ bool OopStorage::expand_active_array() {
log_debug(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
name(), new_size);
ActiveArray* new_array = ActiveArray::create(new_size,
memflags(),
mem_tag(),
AllocFailStrategy::RETURN_NULL);
if (new_array == nullptr) return false;
new_array->copy_from(old_array);
@ -805,8 +805,8 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
}
}
OopStorage* OopStorage::create(const char* name, MEMFLAGS memflags) {
return new (memflags) OopStorage(name, memflags);
OopStorage* OopStorage::create(const char* name, MemTag mem_tag) {
return new (mem_tag) OopStorage(name, mem_tag);
}
const size_t initial_active_array_size = 8;
@ -819,9 +819,9 @@ static Mutex* make_oopstorage_mutex(const char* storage_name,
return new PaddedMutex(rank, name);
}
OopStorage::OopStorage(const char* name, MEMFLAGS memflags) :
OopStorage::OopStorage(const char* name, MemTag mem_tag) :
_name(os::strdup(name)),
_active_array(ActiveArray::create(initial_active_array_size, memflags)),
_active_array(ActiveArray::create(initial_active_array_size, mem_tag)),
_allocation_list(),
_deferred_updates(nullptr),
_allocation_mutex(make_oopstorage_mutex(name, "alloc", Mutex::oopstorage)),
@ -829,7 +829,7 @@ OopStorage::OopStorage(const char* name, MEMFLAGS memflags) :
_num_dead_callback(nullptr),
_allocation_count(0),
_concurrent_iteration_count(0),
_memflags(memflags),
_mem_tag(mem_tag),
_needs_cleanup(false)
{
_active_array->increment_refcount();
@ -1030,7 +1030,7 @@ size_t OopStorage::total_memory_usage() const {
return total_size;
}
MEMFLAGS OopStorage::memflags() const { return _memflags; }
MemTag OopStorage::mem_tag() const { return _mem_tag; }
// Parallel iteration support

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,7 @@ class outputStream;
class OopStorage : public CHeapObjBase {
public:
static OopStorage* create(const char* name, MEMFLAGS memflags);
static OopStorage* create(const char* name, MemTag mem_tag);
~OopStorage();
// These count and usage accessors are racy unless at a safepoint.
@ -89,8 +89,8 @@ public:
// bookkeeping overhead, including this storage object.
size_t total_memory_usage() const;
// The memory type for allocations.
MEMFLAGS memflags() const;
// The memory tag for allocations.
MemTag mem_tag() const;
enum EntryStatus {
INVALID_ENTRY,
@ -273,14 +273,14 @@ private:
// mutable because this gets set even for const iteration.
mutable int _concurrent_iteration_count;
// The memory type for allocations.
MEMFLAGS _memflags;
// The memory tag for allocations.
MemTag _mem_tag;
// Flag indicating this storage object is a candidate for empty block deletion.
volatile bool _needs_cleanup;
// Clients construct via "create" factory function.
OopStorage(const char* name, MEMFLAGS memflags);
OopStorage(const char* name, MemTag mem_tag);
NONCOPYABLE(OopStorage);
bool try_add_block();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ class OopStorage::ActiveArray {
public:
static ActiveArray* create(size_t size,
MEMFLAGS memflags = mtGC,
MemTag mem_tag = mtGC,
AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM);
static void destroy(ActiveArray* ba);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,18 +31,18 @@
OopStorage* OopStorageSet::_storages[all_count] = {};
OopStorage* OopStorageSet::create_strong(const char* name, MEMFLAGS memflags) {
OopStorage* OopStorageSet::create_strong(const char* name, MemTag mem_tag) {
static uint registered_strong = 0;
assert(registered_strong < strong_count, "More registered strong storages than slots");
OopStorage* storage = OopStorage::create(name, memflags);
OopStorage* storage = OopStorage::create(name, mem_tag);
_storages[strong_start + registered_strong++] = storage;
return storage;
}
OopStorage* OopStorageSet::create_weak(const char* name, MEMFLAGS memflags) {
OopStorage* OopStorageSet::create_weak(const char* name, MemTag mem_tag) {
static uint registered_weak = 0;
assert(registered_weak < weak_count, "More registered strong storages than slots");
OopStorage* storage = OopStorage::create(name, memflags);
OopStorage* storage = OopStorage::create(name, mem_tag);
_storages[weak_start + registered_weak++] = storage;
return storage;
}

View File

@ -25,7 +25,7 @@
#ifndef SHARE_GC_SHARED_OOPSTORAGESET_HPP
#define SHARE_GC_SHARED_OOPSTORAGESET_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/enumIterator.hpp"
#include "utilities/globalDefinitions.hpp"
@ -79,8 +79,8 @@ public:
static OopStorage* storage(WeakId id) { return get_storage(id); }
static OopStorage* storage(Id id) { return get_storage(id); }
static OopStorage* create_strong(const char* name, MEMFLAGS memflags);
static OopStorage* create_weak(const char* name, MEMFLAGS memflags);
static OopStorage* create_strong(const char* name, MemTag mem_tag);
static OopStorage* create_weak(const char* name, MemTag mem_tag);
// Support iteration over the storage objects.
template<typename StorageId> class Range;

View File

@ -26,7 +26,7 @@
#include "gc/shared/partialArrayState.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"

View File

@ -35,7 +35,7 @@
#include "gc/shared/stringdedup/stringDedupTable.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "oops/access.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/cpuTimeCounters.hpp"

View File

@ -116,8 +116,8 @@ void TaskQueueStats::reset() {
// TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
template <unsigned int N, MEMFLAGS F>
class TaskQueueSuper: public CHeapObj<F> {
template <unsigned int N, MemTag MT>
class TaskQueueSuper: public CHeapObj<MT> {
protected:
// Internal type for indexing the queue; also used for the tag.
typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
@ -324,39 +324,39 @@ public:
// practice of parallel programming (PPoPP 2013), 69-80
//
template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
class GenericTaskQueue: public TaskQueueSuper<N, F> {
template <class E, MemTag MT, unsigned int N = TASKQUEUE_SIZE>
class GenericTaskQueue: public TaskQueueSuper<N, MT> {
protected:
typedef typename TaskQueueSuper<N, F>::Age Age;
typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
typedef typename TaskQueueSuper<N, MT>::Age Age;
typedef typename TaskQueueSuper<N, MT>::idx_t idx_t;
using TaskQueueSuper<N, F>::MOD_N_MASK;
using TaskQueueSuper<N, MT>::MOD_N_MASK;
using TaskQueueSuper<N, F>::bottom_relaxed;
using TaskQueueSuper<N, F>::bottom_acquire;
using TaskQueueSuper<N, MT>::bottom_relaxed;
using TaskQueueSuper<N, MT>::bottom_acquire;
using TaskQueueSuper<N, F>::set_bottom_relaxed;
using TaskQueueSuper<N, F>::release_set_bottom;
using TaskQueueSuper<N, MT>::set_bottom_relaxed;
using TaskQueueSuper<N, MT>::release_set_bottom;
using TaskQueueSuper<N, F>::age_relaxed;
using TaskQueueSuper<N, F>::set_age_relaxed;
using TaskQueueSuper<N, F>::cmpxchg_age;
using TaskQueueSuper<N, F>::age_top_relaxed;
using TaskQueueSuper<N, MT>::age_relaxed;
using TaskQueueSuper<N, MT>::set_age_relaxed;
using TaskQueueSuper<N, MT>::cmpxchg_age;
using TaskQueueSuper<N, MT>::age_top_relaxed;
using TaskQueueSuper<N, F>::increment_index;
using TaskQueueSuper<N, F>::decrement_index;
using TaskQueueSuper<N, F>::dirty_size;
using TaskQueueSuper<N, F>::clean_size;
using TaskQueueSuper<N, F>::assert_not_underflow;
using TaskQueueSuper<N, MT>::increment_index;
using TaskQueueSuper<N, MT>::decrement_index;
using TaskQueueSuper<N, MT>::dirty_size;
using TaskQueueSuper<N, MT>::clean_size;
using TaskQueueSuper<N, MT>::assert_not_underflow;
public:
typedef typename TaskQueueSuper<N, F>::PopResult PopResult;
typedef typename TaskQueueSuper<N, MT>::PopResult PopResult;
using TaskQueueSuper<N, F>::max_elems;
using TaskQueueSuper<N, F>::size;
using TaskQueueSuper<N, MT>::max_elems;
using TaskQueueSuper<N, MT>::size;
#if TASKQUEUE_STATS
using TaskQueueSuper<N, F>::stats;
using TaskQueueSuper<N, MT>::stats;
#endif
private:
@ -428,12 +428,12 @@ public:
// Note that size() is not hidden--it returns the number of elements in the
// TaskQueue, and does not include the size of the overflow stack. This
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
template<class E, MemTag MT, unsigned int N = TASKQUEUE_SIZE>
class OverflowTaskQueue: public GenericTaskQueue<E, MT, N>
{
public:
typedef Stack<E, F> overflow_t;
typedef GenericTaskQueue<E, F, N> taskqueue_t;
typedef Stack<E, MT> overflow_t;
typedef GenericTaskQueue<E, MT, N> taskqueue_t;
TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
@ -467,11 +467,11 @@ public:
virtual uint tasks() const = 0;
};
template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
template <MemTag MT> class TaskQueueSetSuperImpl: public CHeapObj<MT>, public TaskQueueSetSuper {
};
template<class T, MEMFLAGS F>
class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
template<class T, MemTag MT>
class GenericTaskQueueSet: public TaskQueueSetSuperImpl<MT> {
public:
typedef typename T::element_type E;
typedef typename T::PopResult PopResult;
@ -518,29 +518,29 @@ public:
#endif // TASKQUEUE_STATS
};
template<class T, MEMFLAGS F> void
GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
template<class T, MemTag MT> void
GenericTaskQueueSet<T, MT>::register_queue(uint i, T* q) {
assert(i < _n, "index out of range.");
_queues[i] = q;
}
template<class T, MEMFLAGS F> T*
GenericTaskQueueSet<T, F>::queue(uint i) {
template<class T, MemTag MT> T*
GenericTaskQueueSet<T, MT>::queue(uint i) {
assert(i < _n, "index out of range.");
return _queues[i];
}
#ifdef ASSERT
template<class T, MEMFLAGS F>
void GenericTaskQueueSet<T, F>::assert_empty() const {
template<class T, MemTag MT>
void GenericTaskQueueSet<T, MT>::assert_empty() const {
for (uint j = 0; j < _n; j++) {
_queues[j]->assert_empty();
}
}
#endif // ASSERT
template<class T, MEMFLAGS F>
uint GenericTaskQueueSet<T, F>::tasks() const {
template<class T, MemTag MT>
uint GenericTaskQueueSet<T, MT>::tasks() const {
uint n = 0;
for (uint j = 0; j < _n; j++) {
n += _queues[j]->size();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,30 +38,30 @@
#include "utilities/ostream.hpp"
#include "utilities/stack.inline.hpp"
template <class T, MEMFLAGS F>
inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(uint n) : _n(n) {
template <class T, MemTag MT>
inline GenericTaskQueueSet<T, MT>::GenericTaskQueueSet(uint n) : _n(n) {
typedef T* GenericTaskQueuePtr;
_queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
_queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, MT);
for (uint i = 0; i < n; i++) {
_queues[i] = nullptr;
}
}
template <class T, MEMFLAGS F>
inline GenericTaskQueueSet<T, F>::~GenericTaskQueueSet() {
template <class T, MemTag MT>
inline GenericTaskQueueSet<T, MT>::~GenericTaskQueueSet() {
FREE_C_HEAP_ARRAY(T*, _queues);
}
#if TASKQUEUE_STATS
template<class T, MEMFLAGS F>
void GenericTaskQueueSet<T, F>::print_taskqueue_stats_hdr(outputStream* const st, const char* label) {
template<class T, MemTag MT>
void GenericTaskQueueSet<T, MT>::print_taskqueue_stats_hdr(outputStream* const st, const char* label) {
st->print_cr("GC Task Stats %s", label);
st->print("thr "); TaskQueueStats::print_header(1, st); st->cr();
st->print("--- "); TaskQueueStats::print_header(2, st); st->cr();
}
template<class T, MEMFLAGS F>
void GenericTaskQueueSet<T, F>::print_taskqueue_stats(outputStream* const st, const char* label) {
template<class T, MemTag MT>
void GenericTaskQueueSet<T, MT>::print_taskqueue_stats(outputStream* const st, const char* label) {
print_taskqueue_stats_hdr(st, label);
TaskQueueStats totals;
@ -75,16 +75,16 @@ void GenericTaskQueueSet<T, F>::print_taskqueue_stats(outputStream* const st, co
DEBUG_ONLY(totals.verify());
}
template<class T, MEMFLAGS F>
void GenericTaskQueueSet<T, F>::reset_taskqueue_stats() {
template<class T, MemTag MT>
void GenericTaskQueueSet<T, MT>::reset_taskqueue_stats() {
const uint n = size();
for (uint i = 0; i < n; ++i) {
queue(i)->stats.reset();
}
}
template <class T, MEMFLAGS F>
inline void GenericTaskQueueSet<T, F>::print_and_reset_taskqueue_stats(const char* label) {
template <class T, MemTag MT>
inline void GenericTaskQueueSet<T, MT>::print_and_reset_taskqueue_stats(const char* label) {
if (!log_is_enabled(Trace, gc, task, stats)) {
return;
}
@ -97,19 +97,19 @@ inline void GenericTaskQueueSet<T, F>::print_and_reset_taskqueue_stats(const cha
}
#endif // TASKQUEUE_STATS
template<class E, MEMFLAGS F, unsigned int N>
inline GenericTaskQueue<E, F, N>::GenericTaskQueue() :
_elems(MallocArrayAllocator<E>::allocate(N, F)),
template<class E, MemTag MT, unsigned int N>
inline GenericTaskQueue<E, MT, N>::GenericTaskQueue() :
_elems(MallocArrayAllocator<E>::allocate(N, MT)),
_last_stolen_queue_id(InvalidQueueId),
_seed(17 /* random number */) {}
template<class E, MEMFLAGS F, unsigned int N>
inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
template<class E, MemTag MT, unsigned int N>
inline GenericTaskQueue<E, MT, N>::~GenericTaskQueue() {
MallocArrayAllocator<E>::free(_elems);
}
template<class E, MEMFLAGS F, unsigned int N> inline bool
GenericTaskQueue<E, F, N>::push(E t) {
template<class E, MemTag MT, unsigned int N> inline bool
GenericTaskQueue<E, MT, N>::push(E t) {
uint localBot = bottom_relaxed();
assert(localBot < N, "_bottom out of range.");
idx_t top = age_top_relaxed();
@ -134,8 +134,8 @@ GenericTaskQueue<E, F, N>::push(E t) {
return false; // Queue is full.
}
template <class E, MEMFLAGS F, unsigned int N>
inline bool OverflowTaskQueue<E, F, N>::push(E t) {
template <class E, MemTag MT, unsigned int N>
inline bool OverflowTaskQueue<E, MT, N>::push(E t) {
if (!taskqueue_t::push(t)) {
overflow_stack()->push(t);
TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
@ -143,8 +143,8 @@ inline bool OverflowTaskQueue<E, F, N>::push(E t) {
return true;
}
template <class E, MEMFLAGS F, unsigned int N>
inline bool OverflowTaskQueue<E, F, N>::try_push_to_taskqueue(E t) {
template <class E, MemTag MT, unsigned int N>
inline bool OverflowTaskQueue<E, MT, N>::try_push_to_taskqueue(E t) {
return taskqueue_t::push(t);
}
@ -154,8 +154,8 @@ inline bool OverflowTaskQueue<E, F, N>::try_push_to_taskqueue(E t) {
// whenever the queue goes empty which it will do here if this thread
// gets the last task or in pop_global() if the queue wraps (top == 0
// and pop_global() succeeds, see pop_global()).
template<class E, MEMFLAGS F, unsigned int N>
bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
template<class E, MemTag MT, unsigned int N>
bool GenericTaskQueue<E, MT, N>::pop_local_slow(uint localBot, Age oldAge) {
// This queue was observed to contain exactly one element; either this
// thread will claim it, or a competing "pop_global". In either case,
// the queue will be logically empty afterwards. Create a new Age value
@ -187,8 +187,8 @@ bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
return false;
}
template<class E, MEMFLAGS F, unsigned int N> inline bool
GenericTaskQueue<E, F, N>::pop_local(E& t, uint threshold) {
template<class E, MemTag MT, unsigned int N> inline bool
GenericTaskQueue<E, MT, N>::pop_local(E& t, uint threshold) {
uint localBot = bottom_relaxed();
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
@ -224,8 +224,8 @@ GenericTaskQueue<E, F, N>::pop_local(E& t, uint threshold) {
}
}
template <class E, MEMFLAGS F, unsigned int N>
bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
template <class E, MemTag MT, unsigned int N>
bool OverflowTaskQueue<E, MT, N>::pop_overflow(E& t)
{
if (overflow_empty()) return false;
t = overflow_stack()->pop();
@ -253,8 +253,8 @@ bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
// (3) Owner starts a push, writing elems[bottom]. At the same time, Thief
// reads elems[oldAge.top]. The owner's bottom == the thief's oldAge.top.
// (4) Thief will discard the read value, because its cmpxchg of age will fail.
template<class E, MEMFLAGS F, unsigned int N>
typename GenericTaskQueue<E, F, N>::PopResult GenericTaskQueue<E, F, N>::pop_global(E& t) {
template<class E, MemTag MT, unsigned int N>
typename GenericTaskQueue<E, MT, N>::PopResult GenericTaskQueue<E, MT, N>::pop_global(E& t) {
Age oldAge = age_relaxed();
// Architectures with non-multi-copy-atomic memory model require a
@ -311,13 +311,13 @@ inline int randomParkAndMiller(int *seed0) {
return seed;
}
template<class E, MEMFLAGS F, unsigned int N>
int GenericTaskQueue<E, F, N>::next_random_queue_id() {
template<class E, MemTag MT, unsigned int N>
int GenericTaskQueue<E, MT, N>::next_random_queue_id() {
return randomParkAndMiller(&_seed);
}
template<class T, MEMFLAGS F>
typename GenericTaskQueueSet<T, F>::PopResult GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, E& t) {
template<class T, MemTag MT>
typename GenericTaskQueueSet<T, MT>::PopResult GenericTaskQueueSet<T, MT>::steal_best_of_2(uint queue_num, E& t) {
T* const local_queue = queue(queue_num);
if (_n > 2) {
uint k1 = queue_num;
@ -372,8 +372,8 @@ typename GenericTaskQueueSet<T, F>::PopResult GenericTaskQueueSet<T, F>::steal_b
}
}
template<class T, MEMFLAGS F>
bool GenericTaskQueueSet<T, F>::steal(uint queue_num, E& t) {
template<class T, MemTag MT>
bool GenericTaskQueueSet<T, MT>::steal(uint queue_num, E& t) {
uint const num_retries = 2 * _n;
TASKQUEUE_STATS_ONLY(uint contended_in_a_row = 0;)
@ -394,9 +394,9 @@ bool GenericTaskQueueSet<T, F>::steal(uint queue_num, E& t) {
return false;
}
template<class E, MEMFLAGS F, unsigned int N>
template<class E, MemTag MT, unsigned int N>
template<class Fn>
inline void GenericTaskQueue<E, F, N>::iterate(Fn fn) {
inline void GenericTaskQueue<E, MT, N>::iterate(Fn fn) {
uint iters = size();
uint index = bottom_relaxed();
for (uint i = 0; i < iters; ++i) {

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +58,7 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
// subsystem for mapping not-yet-written-to pages to a single physical backing page,
// but this is not guaranteed, and would confuse NMT and other memory accounting tools.
MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
MemTracker::record_virtual_memory_tag(_map_space.base(), mtGC);
size_t page_size = os::vm_page_size();

View File

@ -252,7 +252,7 @@ jint ShenandoahHeap::initialize() {
bitmap_size_orig, bitmap_page_size,
bitmap.base(),
bitmap.size(), bitmap.page_size());
MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
_bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
_bitmap_region_special = bitmap.special();
@ -276,7 +276,7 @@ jint ShenandoahHeap::initialize() {
os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
"Cannot commit verification bitmap memory");
}
MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
_verification_bit_map.initialize(_heap_region, verify_bitmap_region);
_verifier = new ShenandoahVerifier(this, &_verification_bit_map);
@ -290,7 +290,7 @@ jint ShenandoahHeap::initialize() {
bitmap_size_orig, aux_bitmap_page_size,
aux_bitmap.base(),
aux_bitmap.size(), aux_bitmap.page_size());
MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
MemTracker::record_virtual_memory_tag(aux_bitmap.base(), mtGC);
_aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
_aux_bitmap_region_special = aux_bitmap.special();
_aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
@ -308,7 +308,7 @@ jint ShenandoahHeap::initialize() {
region_storage_size_orig, region_page_size,
region_storage.base(),
region_storage.size(), region_storage.page_size());
MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
MemTracker::record_virtual_memory_tag(region_storage.base(), mtGC);
if (!region_storage.special()) {
os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
"Cannot commit region memory");

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2016, 2024, Red Hat, Inc. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +29,7 @@
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/taskqueue.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutex.hpp"
@ -36,11 +37,11 @@
class ShenandoahHeap;
template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
class BufferedOverflowTaskQueue: public OverflowTaskQueue<E, F, N>
template<class E, MemTag MT, unsigned int N = TASKQUEUE_SIZE>
class BufferedOverflowTaskQueue: public OverflowTaskQueue<E, MT, N>
{
public:
typedef OverflowTaskQueue<E, F, N> taskqueue_t;
typedef OverflowTaskQueue<E, MT, N> taskqueue_t;
BufferedOverflowTaskQueue() : _buf_empty(true) {};
@ -301,8 +302,8 @@ public:
typedef BufferedOverflowTaskQueue<ShenandoahMarkTask, mtGC> ShenandoahBufferedOverflowTaskQueue;
typedef Padded<ShenandoahBufferedOverflowTaskQueue> ShenandoahObjToScanQueue;
template <class T, MEMFLAGS F>
class ParallelClaimableQueueSet: public GenericTaskQueueSet<T, F> {
template <class T, MemTag MT>
class ParallelClaimableQueueSet: public GenericTaskQueueSet<T, MT> {
private:
shenandoah_padding(0);
volatile jint _claimed_index;
@ -311,10 +312,10 @@ private:
debug_only(uint _reserved; )
public:
using GenericTaskQueueSet<T, F>::size;
using GenericTaskQueueSet<T, MT>::size;
public:
ParallelClaimableQueueSet(int n) : GenericTaskQueueSet<T, F>(n), _claimed_index(0) {
ParallelClaimableQueueSet(int n) : GenericTaskQueueSet<T, MT>(n), _claimed_index(0) {
debug_only(_reserved = 0; )
}
@ -331,9 +332,9 @@ public:
debug_only(uint get_reserved() const { return (uint)_reserved; })
};
template <class T, MEMFLAGS F>
T* ParallelClaimableQueueSet<T, F>::claim_next() {
jint size = (jint)GenericTaskQueueSet<T, F>::size();
template <class T, MemTag MT>
T* ParallelClaimableQueueSet<T, MT>::claim_next() {
jint size = (jint)GenericTaskQueueSet<T, MT>::size();
if (_claimed_index >= size) {
return nullptr;
@ -342,7 +343,7 @@ T* ParallelClaimableQueueSet<T, F>::claim_next() {
jint index = Atomic::add(&_claimed_index, 1, memory_order_relaxed);
if (index <= size) {
return GenericTaskQueueSet<T, F>::queue((uint)index - 1);
return GenericTaskQueueSet<T, MT>::queue((uint)index - 1);
} else {
return nullptr;
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,8 +31,8 @@
#include "gc/shared/taskqueue.inline.hpp"
#include "utilities/stack.inline.hpp"
template <class E, MEMFLAGS F, unsigned int N>
bool BufferedOverflowTaskQueue<E, F, N>::pop(E &t) {
template <class E, MemTag MT, unsigned int N>
bool BufferedOverflowTaskQueue<E, MT, N>::pop(E &t) {
if (!_buf_empty) {
t = _elem;
_buf_empty = true;
@ -45,8 +46,8 @@ bool BufferedOverflowTaskQueue<E, F, N>::pop(E &t) {
return taskqueue_t::pop_overflow(t);
}
template <class E, MEMFLAGS F, unsigned int N>
inline bool BufferedOverflowTaskQueue<E, F, N>::push(E t) {
template <class E, MemTag MT, unsigned int N>
inline bool BufferedOverflowTaskQueue<E, MT, N>::push(E t) {
if (_buf_empty) {
_elem = t;
_buf_empty = false;
@ -58,8 +59,8 @@ inline bool BufferedOverflowTaskQueue<E, F, N>::push(E t) {
return true;
}
template <class E, MEMFLAGS F, unsigned int N>
void BufferedOverflowTaskQueue<E, F, N>::clear() {
template <class E, MemTag MT, unsigned int N>
void BufferedOverflowTaskQueue<E, MT, N>::clear() {
_buf_empty = true;
taskqueue_t::set_empty();
taskqueue_t::overflow_stack()->clear();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -182,7 +182,7 @@ bool XVirtualMemoryManager::reserve(size_t max_capacity) {
void XVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) {
MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC);
MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap);
MemTracker::record_virtual_memory_tag((void*)start, mtJavaHeap);
}
bool XVirtualMemoryManager::is_initialized() const {

View File

@ -26,7 +26,7 @@
#include "gc/z/zGlobals.hpp"
#include "gc/z/zNMT.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/memoryFileTracker.hpp"
#include "utilities/nativeCallStack.hpp"

View File

@ -25,7 +25,7 @@
#ifndef SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP
#define SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/objectBitSet.inline.hpp"
typedef ObjectBitSet<mtTracing> JFRBitSet;

View File

@ -735,7 +735,7 @@
</Event>
<Event name="NativeMemoryUsage" category="Java Virtual Machine, Memory" label="Native Memory Usage Per Type"
description="Native memory usage for a given memory type in the JVM" period="everyChunk">
description="Native memory usage for a given memory tag in the JVM" period="everyChunk">
<Field type="NMTType" name="type" label="Memory Type" description="Type used for the native memory allocation" />
<Field type="ulong" contentType="bytes" name="reserved" label="Reserved Memory" description="Reserved bytes for this type" />
<Field type="ulong" contentType="bytes" name="committed" label="Committed Memory" description="Committed bytes for this type" />

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,10 +63,10 @@ void JfrNativeMemoryEvent::send_total_event(const Ticks& timestamp) {
event.commit();
}
void JfrNativeMemoryEvent::send_type_event(const Ticks& starttime, MEMFLAGS flag, size_t reserved, size_t committed) {
void JfrNativeMemoryEvent::send_type_event(const Ticks& starttime, MemTag mem_tag, size_t reserved, size_t committed) {
EventNativeMemoryUsage event(UNTIMED);
event.set_starttime(starttime);
event.set_type(NMTUtil::flag_to_index(flag));
event.set_type(NMTUtil::tag_to_index(mem_tag));
event.set_reserved(reserved);
event.set_committed(committed);
event.commit();
@ -79,12 +79,12 @@ void JfrNativeMemoryEvent::send_type_events(const Ticks& timestamp) {
NMTUsage* usage = get_usage(timestamp);
for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index);
if (flag == mtNone) {
for (int index = 0; index < mt_number_of_tags; index ++) {
MemTag mem_tag = NMTUtil::index_to_tag(index);
if (mem_tag == mtNone) {
// Skip mtNone since it is not really used.
continue;
}
send_type_event(timestamp, flag, usage->reserved(flag), usage->committed(flag));
send_type_event(timestamp, mem_tag, usage->reserved(mem_tag), usage->committed(mem_tag));
}
}

View File

@ -25,7 +25,7 @@
#ifndef SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP
#define SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "nmt/nmtUsage.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
@ -35,7 +35,7 @@
// so no more synchronization is needed.
class JfrNativeMemoryEvent : public AllStatic {
private:
static void send_type_event(const Ticks& starttime, MEMFLAGS flag, size_t reserved, size_t committed);
static void send_type_event(const Ticks& starttime, MemTag mem_tag, size_t reserved, size_t committed);
public:
static void send_total_event(const Ticks& timestamp);
static void send_type_events(const Ticks& timestamp);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -332,10 +332,10 @@ void CompilerTypeConstant::serialize(JfrCheckpointWriter& writer) {
}
void NMTTypeConstant::serialize(JfrCheckpointWriter& writer) {
writer.write_count(mt_number_of_types);
for (int i = 0; i < mt_number_of_types; ++i) {
writer.write_count(mt_number_of_tags);
for (int i = 0; i < mt_number_of_tags; ++i) {
writer.write_key(i);
MEMFLAGS flag = NMTUtil::index_to_flag(i);
writer.write(NMTUtil::flag_to_name(flag));
MemTag mem_tag = NMTUtil::index_to_tag(i);
writer.write(NMTUtil::tag_to_name(mem_tag));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -117,7 +117,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
_rs.base(),
_rs.size(),
os::vm_page_size());
MemTracker::record_virtual_memory_type((address)_rs.base(), mtTracing);
MemTracker::record_virtual_memory_tag((address)_rs.base(), mtTracing);
assert(is_aligned(_rs.base(), os::vm_page_size()), "invariant");
assert(is_aligned(_rs.size(), os::vm_page_size()), "invariant");

View File

@ -36,10 +36,10 @@
// allocate using malloc; will fail if no memory available
char* AllocateHeap(size_t size,
MEMFLAGS flags,
MemTag mem_tag,
const NativeCallStack& stack,
AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
char* p = (char*) os::malloc(size, flags, stack);
char* p = (char*) os::malloc(size, mem_tag, stack);
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
}
@ -47,16 +47,16 @@ char* AllocateHeap(size_t size,
}
char* AllocateHeap(size_t size,
MEMFLAGS flags,
MemTag mem_tag,
AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
return AllocateHeap(size, flags, CALLER_PC, alloc_failmode);
return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode);
}
char* ReallocateHeap(char *old,
size_t size,
MEMFLAGS flag,
MemTag mem_tag,
AllocFailType alloc_failmode) {
char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC);
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
}
@ -119,16 +119,16 @@ void* AnyObj::operator new(size_t size, Arena *arena) {
return res;
}
void* AnyObj::operator new(size_t size, MEMFLAGS flags) throw() {
address res = (address)AllocateHeap(size, flags, CALLER_PC);
void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() {
address res = (address)AllocateHeap(size, mem_tag, CALLER_PC);
DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
return res;
}
void* AnyObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
MEMFLAGS flags) throw() {
MemTag mem_tag) throw() {
// should only call this with std::nothrow, use other operator new() otherwise
address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
return res;
}

View File

@ -26,7 +26,7 @@
#define SHARE_MEMORY_ALLOCATION_HPP
#include "memory/allStatic.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -94,9 +94,9 @@ typedef AllocFailStrategy::AllocFailEnum AllocFailType;
// NEW_C_HEAP_OBJ*
// FREE_C_HEAP_OBJ
//
// char* AllocateHeap(size_t size, MEMFLAGS flags, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// char* AllocateHeap(size_t size, MEMFLAGS flags, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// char* AllocateHeap(size_t size, MemTag mem_tag, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// char* AllocateHeap(size_t size, MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// char* ReallocateHeap(char *old, size_t size, MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// void FreeHeap(void* p);
//
@ -106,16 +106,16 @@ class NativeCallStack;
char* AllocateHeap(size_t size,
MEMFLAGS flags,
MemTag mem_tag,
const NativeCallStack& stack,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
char* AllocateHeap(size_t size,
MEMFLAGS flags,
MemTag mem_tag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
char* ReallocateHeap(char *old,
size_t size,
MEMFLAGS flag,
MemTag mem_tag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// handles null pointers
@ -123,50 +123,50 @@ void FreeHeap(void* p);
class CHeapObjBase {
public:
ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f) {
return AllocateHeap(size, f);
ALWAYSINLINE void* operator new(size_t size, MemTag mem_tag) {
return AllocateHeap(size, mem_tag);
}
ALWAYSINLINE void* operator new(size_t size,
MEMFLAGS f,
MemTag mem_tag,
const NativeCallStack& stack) {
return AllocateHeap(size, f, stack);
return AllocateHeap(size, mem_tag, stack);
}
ALWAYSINLINE void* operator new(size_t size,
MEMFLAGS f,
MemTag mem_tag,
const std::nothrow_t&,
const NativeCallStack& stack) throw() {
return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL);
return AllocateHeap(size, mem_tag, stack, AllocFailStrategy::RETURN_NULL);
}
ALWAYSINLINE void* operator new(size_t size,
MEMFLAGS f,
MemTag mem_tag,
const std::nothrow_t&) throw() {
return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL);
return AllocateHeap(size, mem_tag, AllocFailStrategy::RETURN_NULL);
}
ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f) {
return AllocateHeap(size, f);
ALWAYSINLINE void* operator new[](size_t size, MemTag mem_tag) {
return AllocateHeap(size, mem_tag);
}
ALWAYSINLINE void* operator new[](size_t size,
MEMFLAGS f,
MemTag mem_tag,
const NativeCallStack& stack) {
return AllocateHeap(size, f, stack);
return AllocateHeap(size, mem_tag, stack);
}
ALWAYSINLINE void* operator new[](size_t size,
MEMFLAGS f,
MemTag mem_tag,
const std::nothrow_t&,
const NativeCallStack& stack) throw() {
return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL);
return AllocateHeap(size, mem_tag, stack, AllocFailStrategy::RETURN_NULL);
}
ALWAYSINLINE void* operator new[](size_t size,
MEMFLAGS f,
MemTag mem_tag,
const std::nothrow_t&) throw() {
return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL);
return AllocateHeap(size, mem_tag, AllocFailStrategy::RETURN_NULL);
}
void operator delete(void* p) { FreeHeap(p); }
@ -174,43 +174,43 @@ class CHeapObjBase {
};
// Uses the implicitly static new and delete operators of CHeapObjBase
template<MEMFLAGS F>
template<MemTag MT>
class CHeapObj {
public:
ALWAYSINLINE void* operator new(size_t size) {
return CHeapObjBase::operator new(size, F);
return CHeapObjBase::operator new(size, MT);
}
ALWAYSINLINE void* operator new(size_t size,
const NativeCallStack& stack) {
return CHeapObjBase::operator new(size, F, stack);
return CHeapObjBase::operator new(size, MT, stack);
}
ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt,
const NativeCallStack& stack) throw() {
return CHeapObjBase::operator new(size, F, nt, stack);
return CHeapObjBase::operator new(size, MT, nt, stack);
}
ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt) throw() {
return CHeapObjBase::operator new(size, F, nt);
return CHeapObjBase::operator new(size, MT, nt);
}
ALWAYSINLINE void* operator new[](size_t size) {
return CHeapObjBase::operator new[](size, F);
return CHeapObjBase::operator new[](size, MT);
}
ALWAYSINLINE void* operator new[](size_t size,
const NativeCallStack& stack) {
return CHeapObjBase::operator new[](size, F, stack);
return CHeapObjBase::operator new[](size, MT, stack);
}
ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt,
const NativeCallStack& stack) throw() {
return CHeapObjBase::operator new[](size, F, nt, stack);
return CHeapObjBase::operator new[](size, MT, nt, stack);
}
ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt) throw() {
return CHeapObjBase::operator new[](size, F, nt);
return CHeapObjBase::operator new[](size, MT, nt);
}
void operator delete(void* p) {
@ -439,10 +439,10 @@ protected:
public:
// CHeap allocations
void* operator new(size_t size, MEMFLAGS flags) throw();
void* operator new [](size_t size, MEMFLAGS flags) throw() = delete;
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() = delete;
void* operator new(size_t size, MemTag mem_tag) throw();
void* operator new [](size_t size, MemTag mem_tag) throw() = delete;
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MemTag mem_tag) throw();
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MemTag mem_tag) throw() = delete;
// Arena allocations
void* operator new(size_t size, Arena *arena);
@ -510,36 +510,36 @@ protected:
#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
(type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
#define NEW_C_HEAP_ARRAY3(type, size, mem_tag, pc, allocfail)\
(type*) AllocateHeap((size) * sizeof(type), mem_tag, pc, allocfail)
#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
(type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
#define NEW_C_HEAP_ARRAY2(type, size, mem_tag, pc)\
(type*) (AllocateHeap((size) * sizeof(type), mem_tag, pc))
#define NEW_C_HEAP_ARRAY(type, size, memflags)\
(type*) (AllocateHeap((size) * sizeof(type), memflags))
#define NEW_C_HEAP_ARRAY(type, size, mem_tag)\
(type*) (AllocateHeap((size) * sizeof(type), mem_tag))
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, mem_tag, pc)\
NEW_C_HEAP_ARRAY3(type, (size), mem_tag, pc, AllocFailStrategy::RETURN_NULL)
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
NEW_C_HEAP_ARRAY2(type, (size), memflags, AllocFailStrategy::RETURN_NULL)
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, mem_tag)\
NEW_C_HEAP_ARRAY2(type, (size), mem_tag, AllocFailStrategy::RETURN_NULL)
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
#define REALLOC_C_HEAP_ARRAY(type, old, size, mem_tag)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), mem_tag))
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, mem_tag)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), mem_tag, AllocFailStrategy::RETURN_NULL))
#define FREE_C_HEAP_ARRAY(type, old) \
FreeHeap((char*)(old))
// allocate type in heap without calling ctor
#define NEW_C_HEAP_OBJ(type, memflags)\
NEW_C_HEAP_ARRAY(type, 1, memflags)
#define NEW_C_HEAP_OBJ(type, mem_tag)\
NEW_C_HEAP_ARRAY(type, 1, mem_tag)
#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
#define NEW_C_HEAP_OBJ_RETURN_NULL(type, mem_tag)\
NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, mem_tag)
// deallocate obj of type in heap without calling dtor
#define FREE_C_HEAP_OBJ(objname)\
@ -568,8 +568,8 @@ class MmapArrayAllocator : public AllStatic {
static size_t size_for(size_t length);
public:
static E* allocate_or_null(size_t length, MEMFLAGS flags);
static E* allocate(size_t length, MEMFLAGS flags);
static E* allocate_or_null(size_t length, MemTag mem_tag);
static E* allocate(size_t length, MemTag mem_tag);
static void free(E* addr, size_t length);
};
@ -579,8 +579,8 @@ class MallocArrayAllocator : public AllStatic {
public:
static size_t size_for(size_t length);
static E* allocate(size_t length, MEMFLAGS flags);
static E* reallocate(E* addr, size_t new_length, MEMFLAGS flags);
static E* allocate(size_t length, MemTag mem_tag);
static E* reallocate(E* addr, size_t new_length, MemTag mem_tag);
static void free(E* addr);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,10 +55,10 @@ size_t MmapArrayAllocator<E>::size_for(size_t length) {
}
template <class E>
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MemTag mem_tag) {
size_t size = size_for(length);
char* addr = os::reserve_memory(size, !ExecMem, flags);
char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
if (addr == nullptr) {
return nullptr;
}
@ -72,10 +72,10 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
}
template <class E>
E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
E* MmapArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
size_t size = size_for(length);
char* addr = os::reserve_memory(size, !ExecMem, flags);
char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
if (addr == nullptr) {
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
@ -97,13 +97,13 @@ size_t MallocArrayAllocator<E>::size_for(size_t length) {
}
template <class E>
E* MallocArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
return (E*)AllocateHeap(size_for(length), flags);
E* MallocArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
return (E*)AllocateHeap(size_for(length), mem_tag);
}
template <class E>
E* MallocArrayAllocator<E>::reallocate(E* addr, size_t new_length, MEMFLAGS flags) {
return (E*)ReallocateHeap((char*)addr, size_for(new_length), flags);
E* MallocArrayAllocator<E>::reallocate(E* addr, size_t new_length, MemTag mem_tag) {
return (E*)ReallocateHeap((char*)addr, size_for(new_length), mem_tag);
}
template <class E>

View File

@ -222,8 +222,8 @@ void Chunk::next_chop(Chunk* k) {
k->_next = nullptr;
}
Arena::Arena(MEMFLAGS flag, Tag tag, size_t init_size) :
_flags(flag), _tag(tag),
Arena::Arena(MemTag mem_tag, Tag tag, size_t init_size) :
_mem_tag(mem_tag), _tag(tag),
_size_in_bytes(0),
_first(nullptr), _chunk(nullptr),
_hwm(nullptr), _max(nullptr)
@ -233,13 +233,13 @@ Arena::Arena(MEMFLAGS flag, Tag tag, size_t init_size) :
_first = _chunk;
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
MemTracker::record_new_arena(flag);
MemTracker::record_new_arena(mem_tag);
set_size_in_bytes(init_size);
}
Arena::~Arena() {
destruct_contents();
MemTracker::record_arena_free(_flags);
MemTracker::record_arena_free(_mem_tag);
}
// Destroy this arenas contents and reset to empty
@ -259,8 +259,8 @@ void Arena::set_size_in_bytes(size_t size) {
if (_size_in_bytes != size) {
ssize_t delta = size - size_in_bytes();
_size_in_bytes = size;
MemTracker::record_arena_size_change(delta, _flags);
if (CompilationMemoryStatistic::enabled() && _flags == mtCompiler) {
MemTracker::record_arena_size_change(delta, _mem_tag);
if (CompilationMemoryStatistic::enabled() && _mem_tag == mtCompiler) {
Thread* const t = Thread::current();
if (t != nullptr && t->is_Compiler_thread()) {
CompilationMemoryStatistic::on_arena_change(delta, this);
@ -286,7 +286,7 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
// (Note: all chunk sizes have to be 64-bit aligned)
size_t len = MAX2(ARENA_ALIGN(x), (size_t) Chunk::size);
if (MemTracker::check_exceeds_limit(x, _flags)) {
if (MemTracker::check_exceeds_limit(x, _mem_tag)) {
return nullptr;
}

View File

@ -107,7 +107,7 @@ public:
static const char* tag_desc[static_cast<int>(Arena::Tag::tag_count)];
private:
const MEMFLAGS _flags; // Memory tracking flags
const MemTag _mem_tag; // Native Memory Tracking tag
const Tag _tag;
size_t _size_in_bytes; // Size of arena (used for native memory tracking)
@ -138,7 +138,7 @@ protected:
public:
// Start the chunk_pool cleaner task
static void start_chunk_pool_cleaner_task();
Arena(MEMFLAGS memflag, Tag tag = Tag::tag_other, size_t init_size = Chunk::init_size);
Arena(MemTag mem_tag, Tag tag = Tag::tag_other, size_t init_size = Chunk::init_size);
~Arena();
void destruct_contents();
char* hwm() const { return _hwm; }

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "memory/guardedMemory.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "runtime/os.hpp"
void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -232,7 +232,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
return false;
}
MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
MemTracker::record_virtual_memory_tag((address)_segmap.low_boundary(), mtCode);
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,8 +102,8 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
return MemRegion();
}
MemRegion* MemRegion::create_array(size_t length, MEMFLAGS flags) {
MemRegion* result = NEW_C_HEAP_ARRAY(MemRegion, length, flags);
MemRegion* MemRegion::create_array(size_t length, MemTag mem_tag) {
MemRegion* result = NEW_C_HEAP_ARRAY(MemRegion, length, mem_tag);
for (size_t i = 0; i < length; i++) {
::new (&result[i]) MemRegion();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,7 +90,7 @@ public:
bool is_empty() const { return word_size() == 0; }
// Creates and initializes an array of MemRegions of the given length.
static MemRegion* create_array(size_t length, MEMFLAGS flags);
static MemRegion* create_array(size_t length, MemTag mem_tag);
static void destroy_array(MemRegion* array, size_t length);
};

View File

@ -772,7 +772,7 @@ void Metaspace::global_initialize() {
}
// Mark class space as such
MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtClass);
// Initialize space
Metaspace::initialize_class_space(rs);

View File

@ -259,7 +259,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
if (!rs.is_reserved()) {
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
}
MemTracker::record_virtual_memory_type(rs.base(), mtMetaspace);
MemTracker::record_virtual_memory_tag(rs.base(), mtMetaspace);
assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
InternalStats::inc_num_vsnodes_births();
return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);

View File

@ -25,7 +25,7 @@
#ifndef SHARE_MEMORY_PADDED_HPP
#define SHARE_MEMORY_PADDED_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@ -89,7 +89,7 @@ class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
// Helper class to create an array of PaddedEnd<T> objects. All elements will
// start at a multiple of alignment and the size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_PADDING_SIZE>
template <class T, MemTag MT, size_t alignment = DEFAULT_PADDING_SIZE>
class PaddedArray {
public:
// Creates an aligned padded array.
@ -100,7 +100,7 @@ class PaddedArray {
// Helper class to create an array of references to arrays of primitive types
// Both the array of references and the data arrays are aligned to the given
// alignment. The allocated memory is zero-filled.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_PADDING_SIZE>
template <class T, MemTag MT, size_t alignment = DEFAULT_PADDING_SIZE>
class Padded2DArray {
public:
// Creates an aligned padded 2D array.
@ -112,7 +112,7 @@ class Padded2DArray {
// Helper class to create an array of T objects. The array as a whole will
// start at a multiple of alignment and its size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_PADDING_SIZE>
template <class T, MemTag MT, size_t alignment = DEFAULT_PADDING_SIZE>
class PaddedPrimitiveArray {
public:
static T* create_unfreeable(size_t length);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,13 +34,13 @@
// Creates an aligned padded array.
// The memory can't be deleted since the raw memory chunk is not returned.
template <class T, MEMFLAGS flags, size_t alignment>
PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
template <class T, MemTag MT, size_t alignment>
PaddedEnd<T>* PaddedArray<T, MT, alignment>::create_unfreeable(uint length) {
// Check that the PaddedEnd class works as intended.
STATIC_ASSERT(is_aligned(sizeof(PaddedEnd<T>), alignment));
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, MT);
// Make the initial alignment.
PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_up(chunk, alignment);
@ -53,8 +53,8 @@ PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
return aligned_padded_array;
}
template <class T, MEMFLAGS flags, size_t alignment>
T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
template <class T, MemTag MT, size_t alignment>
T** Padded2DArray<T, MT, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
// Calculate and align the size of the first dimension's table.
size_t table_size = align_up(rows * sizeof(T*), alignment);
// The size of the separate rows.
@ -63,7 +63,7 @@ T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint column
size_t total_size = table_size + rows * row_size + alignment;
// Allocate a chunk of memory large enough to allow alignment of the chunk.
void* chunk = MmapArrayAllocator<uint8_t>::allocate(total_size, flags);
void* chunk = MmapArrayAllocator<uint8_t>::allocate(total_size, MT);
// Clear the allocated memory.
// Align the chunk of memory.
T** result = (T**)align_up(chunk, alignment);
@ -81,16 +81,16 @@ T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint column
return result;
}
template <class T, MEMFLAGS flags, size_t alignment>
T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
template <class T, MemTag MT, size_t alignment>
T* PaddedPrimitiveArray<T, MT, alignment>::create_unfreeable(size_t length) {
void* temp;
return create(length, &temp);
}
template <class T, MEMFLAGS flags, size_t alignment>
T* PaddedPrimitiveArray<T, flags, alignment>::create(size_t length, void** alloc_base) {
template <class T, MemTag MT, size_t alignment>
T* PaddedPrimitiveArray<T, MT, alignment>::create(size_t length, void** alloc_base) {
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags);
void* chunk = AllocateHeap(length * sizeof(T) + alignment, MT);
memset(chunk, 0, length * sizeof(T) + alignment);

View File

@ -51,11 +51,11 @@ class ResourceArea: public Arena {
#endif // ASSERT
public:
ResourceArea(MEMFLAGS flags = mtThread) :
Arena(flags, Arena::Tag::tag_ra) DEBUG_ONLY(COMMA _nesting(0)) {}
ResourceArea(MemTag mem_tag = mtThread) :
Arena(mem_tag, Arena::Tag::tag_ra) DEBUG_ONLY(COMMA _nesting(0)) {}
ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) :
Arena(flags, Arena::Tag::tag_ra, init_size) DEBUG_ONLY(COMMA _nesting(0)) {
ResourceArea(size_t init_size, MemTag mem_tag = mtThread) :
Arena(mem_tag, Arena::Tag::tag_ra, init_size) DEBUG_ONLY(COMMA _nesting(0)) {
}
char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);

View File

@ -653,7 +653,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
"area must be distinguishable from marks for mark-sweep");
if (base() != nullptr) {
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
}
if (_fd_for_heap != -1) {
@ -671,7 +671,7 @@ ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
size_t rs_align,
size_t rs_page_size) : ReservedSpace() {
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
MemTracker::record_virtual_memory_type((address)base(), mtCode);
MemTracker::record_virtual_memory_tag((address)base(), mtCode);
}
// VirtualSpace

View File

@ -25,7 +25,7 @@
#ifndef SHARE_NMT_ALLOCATIONSITE_HPP
#define SHARE_NMT_ALLOCATIONSITE_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/nativeCallStack.hpp"
// Allocation site represents a code path that makes a memory
@ -33,9 +33,9 @@
class AllocationSite {
private:
const NativeCallStack _call_stack;
const MEMFLAGS _flag;
const MemTag _mem_tag;
public:
AllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : _call_stack(stack), _flag(flag) { }
AllocationSite(const NativeCallStack& stack, MemTag mem_tag) : _call_stack(stack), _mem_tag(mem_tag) { }
bool equals(const NativeCallStack& stack) const {
return _call_stack.equals(stack);
@ -49,7 +49,7 @@ class AllocationSite {
return &_call_stack;
}
MEMFLAGS flag() const { return _flag; }
MemTag mem_tag() const { return _mem_tag; }
};
#endif // SHARE_NMT_ALLOCATIONSITE_HPP

View File

@ -31,7 +31,7 @@
// A flat array of elements E, backed by C-heap, growing on-demand. It allows for
// returning arbitrary elements and keeps them in a freelist. Elements can be uniquely
// identified via array index.
template<typename E, MEMFLAGS flag>
template<typename E, MemTag MT>
class ArrayWithFreeList {
// An E must be trivially copyable and destructible, but it may be constructed
@ -52,7 +52,7 @@ private:
E e;
};
GrowableArrayCHeap<BackingElement, flag> _backing_storage;
GrowableArrayCHeap<BackingElement, MT> _backing_storage;
I _free_start;
bool is_in_bounds(I i) {

View File

@ -26,16 +26,16 @@
#include "nmt/mallocHeader.inline.hpp"
#include "nmt/mallocSiteTable.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/nativeCallStack.hpp"
#include "utilities/ostream.hpp"
// The malloc header, as well as the coming VMATree implementation, rely on MEMFLAGS
// The malloc header, as well as the coming VMATree implementation, rely on MemTag
// fitting into eight bits.
STATIC_ASSERT(sizeof(MEMFLAGS) == sizeof(uint8_t));
STATIC_ASSERT(sizeof(MemTag) == sizeof(uint8_t));
void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const {
assert(bad_address >= (address)this, "sanity");

View File

@ -26,7 +26,7 @@
#ifndef SHARE_NMT_MALLOCHEADER_HPP
#define SHARE_NMT_MALLOCHEADER_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/nativeCallStack.hpp"
@ -92,7 +92,7 @@ class MallocHeader {
NOT_LP64(uint32_t _alt_canary);
const size_t _size;
const uint32_t _mst_marker;
const MEMFLAGS _flags;
const MemTag _mem_tag;
const uint8_t _unused;
uint16_t _canary;
@ -121,19 +121,19 @@ public:
// Contains all of the necessary data to to deaccount block with NMT.
struct FreeInfo {
const size_t size;
const MEMFLAGS flags;
const MemTag mem_tag;
const uint32_t mst_marker;
};
inline MallocHeader(size_t size, MEMFLAGS flags, uint32_t mst_marker);
inline MallocHeader(size_t size, MemTag mem_tag, uint32_t mst_marker);
inline size_t size() const { return _size; }
inline MEMFLAGS flags() const { return _flags; }
inline size_t size() const { return _size; }
inline MemTag mem_tag() const { return _mem_tag; }
inline uint32_t mst_marker() const { return _mst_marker; }
// Return the necessary data to deaccount the block with NMT.
FreeInfo free_info() {
return FreeInfo{this->size(), this->flags(), this->mst_marker()};
return FreeInfo{this->size(), this->mem_tag(), this->mst_marker()};
}
inline void mark_block_as_dead();
inline void revive();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -34,8 +34,8 @@
#include "utilities/macros.hpp"
#include "utilities/nativeCallStack.hpp"
inline MallocHeader::MallocHeader(size_t size, MEMFLAGS flags, uint32_t mst_marker)
: _size(size), _mst_marker(mst_marker), _flags(flags),
inline MallocHeader::MallocHeader(size_t size, MemTag mem_tag, uint32_t mst_marker)
: _size(size), _mst_marker(mst_marker), _mem_tag(mem_tag),
_unused(0), _canary(_header_canary_live_mark)
{
assert(size < max_reasonable_malloc_size, "Too large allocation size?");

View File

@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "nmt/mallocLimit.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "nmt/nmtCommon.hpp"
#include "runtime/java.hpp"
#include "runtime/globals.hpp"
@ -80,7 +80,7 @@ public:
// Check if string at position matches a category name.
// Advances position on match.
bool match_category(MEMFLAGS* out) {
bool match_category(MemTag* out) {
if (eof()) {
return false;
}
@ -90,9 +90,9 @@ public:
}
stringStream ss;
ss.print("%.*s", (int)(end - _p), _p);
MEMFLAGS f = NMTUtil::string_to_flag(ss.base());
if (f != mtNone) {
*out = f;
MemTag mem_tag = NMTUtil::string_to_mem_tag(ss.base());
if (mem_tag != mtNone) {
*out = mem_tag;
_p = end;
return true;
}
@ -131,16 +131,16 @@ void MallocLimitSet::set_global_limit(size_t s, MallocLimitMode flag) {
_glob.sz = s; _glob.mode = flag;
}
void MallocLimitSet::set_category_limit(MEMFLAGS f, size_t s, MallocLimitMode flag) {
const int i = NMTUtil::flag_to_index(f);
void MallocLimitSet::set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode flag) {
const int i = NMTUtil::tag_to_index(mem_tag);
_cat[i].sz = s; _cat[i].mode = flag;
}
void MallocLimitSet::reset() {
set_global_limit(0, MallocLimitMode::trigger_fatal);
_glob.sz = 0; _glob.mode = MallocLimitMode::trigger_fatal;
for (int i = 0; i < mt_number_of_types; i++) {
set_category_limit(NMTUtil::index_to_flag(i), 0, MallocLimitMode::trigger_fatal);
for (int i = 0; i < mt_number_of_tags; i++) {
set_category_limit(NMTUtil::index_to_tag(i), 0, MallocLimitMode::trigger_fatal);
}
}
@ -150,10 +150,10 @@ void MallocLimitSet::print_on(outputStream* st) const {
st->print_cr("MallocLimit: total limit: " PROPERFMT " (%s)", PROPERFMTARGS(_glob.sz),
mode_to_name(_glob.mode));
} else {
for (int i = 0; i < mt_number_of_types; i++) {
for (int i = 0; i < mt_number_of_tags; i++) {
if (_cat[i].sz > 0) {
st->print_cr("MallocLimit: category \"%s\" limit: " PROPERFMT " (%s)",
NMTUtil::flag_to_enum_name(NMTUtil::index_to_flag(i)),
NMTUtil::tag_to_enum_name(NMTUtil::index_to_tag(i)),
PROPERFMTARGS(_cat[i].sz), mode_to_name(_cat[i].mode));
}
}
@ -187,13 +187,13 @@ bool MallocLimitSet::parse_malloclimit_option(const char* v, const char** err) {
// Category-specific form?
else {
while (!sst.eof()) {
MEMFLAGS f;
MemTag mem_tag;
// Match category, followed by :
BAIL_UNLESS(sst.match_category(&f), "Expected category name");
BAIL_UNLESS(sst.match_category(&mem_tag), "Expected category name");
BAIL_UNLESS(sst.match_char(':'), "Expected colon following category");
malloclimit* const modified_limit = &_cat[NMTUtil::flag_to_index(f)];
malloclimit* const modified_limit = &_cat[NMTUtil::tag_to_index(mem_tag)];
// Match size
BAIL_UNLESS(sst.match_size(&modified_limit->sz), "Expected size");

View File

@ -27,7 +27,7 @@
#define SHARE_SERVICES_MALLOCLIMIT_HPP
#include "memory/allStatic.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@ -46,18 +46,18 @@ class outputStream;
class MallocLimitSet {
malloclimit _glob; // global limit
malloclimit _cat[mt_number_of_types]; // per-category limit
malloclimit _cat[mt_number_of_tags]; // per-category limit
public:
MallocLimitSet();
void reset();
bool parse_malloclimit_option(const char* optionstring, const char** err);
void set_global_limit(size_t s, MallocLimitMode flag);
void set_category_limit(MEMFLAGS f, size_t s, MallocLimitMode flag);
void set_global_limit(size_t s, MallocLimitMode type);
void set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode mode);
const malloclimit* global_limit() const { return &_glob; }
const malloclimit* category_limit(MEMFLAGS f) const { return &_cat[(int)f]; }
const malloclimit* category_limit(MemTag mem_tag) const { return &_cat[(int)mem_tag]; }
void print_on(outputStream* st) const;
};
@ -69,7 +69,7 @@ class MallocLimitHandler : public AllStatic {
public:
static const malloclimit* global_limit() { return _limits.global_limit(); }
static const malloclimit* category_limit(MEMFLAGS f) { return _limits.category_limit(f); }
static const malloclimit* category_limit(MemTag mem_tag) { return _limits.category_limit(mem_tag); }
static void initialize(const char* options);
static void print_on(outputStream* st);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,15 +111,15 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) {
* 2. Overflow hash bucket.
* Under any of above circumstances, caller should handle the situation.
*/
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags) {
assert(flags != mtNone, "Should have a real memory type");
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MemTag mem_tag) {
assert(mem_tag != mtNone, "Should have a real memory tag");
const unsigned int hash = key.calculate_hash();
const unsigned int index = hash_to_index(hash);
*marker = 0;
// First entry for this hash bucket
if (_table[index] == nullptr) {
MallocSiteHashtableEntry* entry = new_entry(key, flags);
MallocSiteHashtableEntry* entry = new_entry(key, mem_tag);
// OOM check
if (entry == nullptr) return nullptr;
@ -137,14 +137,14 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t*
while (head != nullptr && pos_idx < MAX_BUCKET_LENGTH) {
if (head->hash() == hash) {
MallocSite* site = head->data();
if (site->flag() == flags && site->equals(key)) {
if (site->mem_tag() == mem_tag && site->equals(key)) {
*marker = build_marker(index, pos_idx);
return head->data();
}
}
if (head->next() == nullptr && pos_idx < (MAX_BUCKET_LENGTH - 1)) {
MallocSiteHashtableEntry* entry = new_entry(key, flags);
MallocSiteHashtableEntry* entry = new_entry(key, mem_tag);
// OOM check
if (entry == nullptr) return nullptr;
if (head->atomic_insert(entry)) {
@ -177,10 +177,10 @@ MallocSite* MallocSiteTable::malloc_site(uint32_t marker) {
// Allocates MallocSiteHashtableEntry object. Special call stack
// (pre-installed allocation site) has to be used to avoid infinite
// recursion.
MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) {
MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MemTag mem_tag) {
void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
*hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
return ::new (p) MallocSiteHashtableEntry(key, flags);
return ::new (p) MallocSiteHashtableEntry(key, mem_tag);
}
bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {

View File

@ -38,8 +38,8 @@
class MallocSite : public AllocationSite {
MemoryCounter _c;
public:
MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
AllocationSite(stack, flags) {}
MallocSite(const NativeCallStack& stack, MemTag mem_tag) :
AllocationSite(stack, mem_tag) {}
void allocate(size_t size) { _c.allocate(size); }
void deallocate(size_t size) { _c.deallocate(size); }
@ -63,9 +63,9 @@ class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
public:
MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
_malloc_site(stack, flags), _hash(stack.calculate_hash()), _next(nullptr) {
assert(flags != mtNone, "Expect a real memory type");
MallocSiteHashtableEntry(NativeCallStack stack, MemTag mem_tag):
_malloc_site(stack, mem_tag), _hash(stack.calculate_hash()), _next(nullptr) {
assert(mem_tag != mtNone, "Expect a real memory tag");
}
inline const MallocSiteHashtableEntry* next() const {
@ -147,8 +147,8 @@ class MallocSiteTable : AllStatic {
// 1. out of memory
// 2. overflow hash bucket
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
uint32_t* marker, MEMFLAGS flags) {
MallocSite* site = lookup_or_add(stack, marker, flags);
uint32_t* marker, MemTag mem_tag) {
MallocSite* site = lookup_or_add(stack, marker, mem_tag);
if (site != nullptr) site->allocate(size);
return site != nullptr;
}
@ -170,9 +170,9 @@ class MallocSiteTable : AllStatic {
static void print_tuning_statistics(outputStream* st);
private:
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MemTag mem_tag);
static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags);
static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MemTag mem_tag);
static MallocSite* malloc_site(uint32_t marker);
static bool walk(MallocSiteWalker* walker);

View File

@ -69,7 +69,7 @@ void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) {
s->_all_mallocs = _all_mallocs;
size_t total_size = 0;
size_t total_count = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
for (int index = 0; index < mt_number_of_tags; index ++) {
s->_malloc[index] = _malloc[index];
total_size += s->_malloc[index].malloc_size();
total_count += s->_malloc[index].malloc_count();
@ -81,7 +81,7 @@ void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) {
// Total malloc'd memory used by arenas
size_t MallocMemorySnapshot::total_arena() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
for (int index = 0; index < mt_number_of_tags; index ++) {
amount += _malloc[index].arena_size();
}
return amount;
@ -91,7 +91,7 @@ size_t MallocMemorySnapshot::total_arena() const {
// from total chunks to get total free chunk size
void MallocMemorySnapshot::make_adjustment() {
size_t arena_size = total_arena();
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
int chunk_idx = NMTUtil::tag_to_index(mtChunk);
_malloc[chunk_idx].record_free(arena_size);
_all_mallocs.deallocate(arena_size);
}
@ -128,11 +128,11 @@ bool MallocMemorySummary::total_limit_reached(size_t s, size_t so_far, const mal
return true;
}
bool MallocMemorySummary::category_limit_reached(MEMFLAGS f, size_t s, size_t so_far, const malloclimit* limit) {
bool MallocMemorySummary::category_limit_reached(MemTag mem_tag, size_t s, size_t so_far, const malloclimit* limit) {
#define FORMATTED \
"MallocLimit: reached category \"%s\" limit (triggering allocation size: " PROPERFMT ", allocated so far: " PROPERFMT ", limit: " PROPERFMT ") ", \
NMTUtil::flag_to_enum_name(f), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz)
NMTUtil::tag_to_enum_name(mem_tag), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz)
// If we hit the limit during error reporting, we print a short warning but otherwise ignore it.
// We don't want to risk recursive assertion or torn hs-err logs.
@ -167,20 +167,20 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {
}
// Record a malloc memory allocation
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MemTag mem_tag,
const NativeCallStack& stack)
{
assert(MemTracker::enabled(), "precondition");
assert(malloc_base != nullptr, "precondition");
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_malloc(size, mem_tag);
uint32_t mst_marker = 0;
if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::allocation_at(stack, size, &mst_marker, flags);
MallocSiteTable::allocation_at(stack, size, &mst_marker, mem_tag);
}
// Uses placement global new operator to initialize malloc header
MallocHeader* const header = ::new (malloc_base)MallocHeader(size, flags, mst_marker);
MallocHeader* const header = ::new (malloc_base)MallocHeader(size, mem_tag, mst_marker);
void* const memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
// The alignment check: 8 bytes alignment for 32 bit systems.
@ -192,7 +192,7 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
{
const MallocHeader* header2 = MallocHeader::resolve_checked(memblock);
assert(header2->size() == size, "Wrong size");
assert(header2->flags() == flags, "Wrong flags");
assert(header2->mem_tag() == mem_tag, "Wrong memory tag");
}
#endif
@ -213,7 +213,7 @@ void* MallocTracker::record_free_block(void* memblock) {
}
void MallocTracker::deaccount(MallocHeader::FreeInfo free_info) {
MallocMemorySummary::record_free(free_info.size, free_info.flags);
MallocMemorySummary::record_free(free_info.size, free_info.mem_tag);
if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(free_info.size, free_info.mst_marker);
}
@ -296,7 +296,7 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) {
p2i(p), where,
(block->is_dead() ? "dead" : "live"),
p2i(block + 1), // lets print the payload start, not the header
block->size(), NMTUtil::flag_to_enum_name(block->flags()));
block->size(), NMTUtil::tag_to_enum_name(block->mem_tag()));
if (MemTracker::tracking_level() == NMT_detail) {
NativeCallStack ncs;
if (MallocSiteTable::access_stack(ncs, *block)) {

View File

@ -27,7 +27,7 @@
#define SHARE_NMT_MALLOCTRACKER_HPP
#include "nmt/mallocHeader.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "nmt/nmtCommon.hpp"
#include "runtime/atomic.hpp"
#include "runtime/threadCritical.hpp"
@ -150,18 +150,18 @@ class MallocMemorySnapshot {
friend class MallocMemorySummary;
private:
MallocMemory _malloc[mt_number_of_types];
MallocMemory _malloc[mt_number_of_tags];
MemoryCounter _all_mallocs;
public:
inline MallocMemory* by_type(MEMFLAGS flags) {
int index = NMTUtil::flag_to_index(flags);
inline MallocMemory* by_type(MemTag mem_tag) {
int index = NMTUtil::tag_to_index(mem_tag);
return &_malloc[index];
}
inline const MallocMemory* by_type(MEMFLAGS flags) const {
int index = NMTUtil::flag_to_index(flags);
inline const MallocMemory* by_type(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return &_malloc[index];
}
@ -214,31 +214,31 @@ class MallocMemorySummary : AllStatic {
// Called when a total limit break was detected.
// Will return true if the limit was handled, false if it was ignored.
static bool category_limit_reached(MEMFLAGS f, size_t s, size_t so_far, const malloclimit* limit);
static bool category_limit_reached(MemTag mem_tag, size_t s, size_t so_far, const malloclimit* limit);
public:
static void initialize();
static inline void record_malloc(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_malloc(size);
static inline void record_malloc(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_malloc(size);
as_snapshot()->_all_mallocs.allocate(size);
}
static inline void record_free(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_free(size);
static inline void record_free(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_free(size);
as_snapshot()->_all_mallocs.deallocate(size);
}
static inline void record_new_arena(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_new_arena();
static inline void record_new_arena(MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_new_arena();
}
static inline void record_arena_free(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_free();
static inline void record_arena_free(MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_arena_free();
}
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_size_change(size);
static inline void record_arena_size_change(ssize_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_arena_size_change(size);
}
static void snapshot(MallocMemorySnapshot* s) {
@ -257,7 +257,7 @@ class MallocMemorySummary : AllStatic {
// MallocLimit: returns true if allocating s bytes on f would trigger
// either global or the category limit
static inline bool check_exceeds_limit(size_t s, MEMFLAGS f);
static inline bool check_exceeds_limit(size_t s, MemTag mem_tag);
};
@ -280,7 +280,7 @@ class MallocTracker : AllStatic {
//
// Record malloc on specified memory block
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
static void* record_malloc(void* malloc_base, size_t size, MemTag mem_tag,
const NativeCallStack& stack);
// Given a block returned by os::malloc() or os::realloc():
@ -289,21 +289,21 @@ class MallocTracker : AllStatic {
// Given the free info from a block, de-account block from NMT.
static void deaccount(MallocHeader::FreeInfo free_info);
static inline void record_new_arena(MEMFLAGS flags) {
MallocMemorySummary::record_new_arena(flags);
static inline void record_new_arena(MemTag mem_tag) {
MallocMemorySummary::record_new_arena(mem_tag);
}
static inline void record_arena_free(MEMFLAGS flags) {
MallocMemorySummary::record_arena_free(flags);
static inline void record_arena_free(MemTag mem_tag) {
MallocMemorySummary::record_arena_free(mem_tag);
}
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) {
MallocMemorySummary::record_arena_size_change(size, flags);
static inline void record_arena_size_change(ssize_t size, MemTag mem_tag) {
MallocMemorySummary::record_arena_size_change(size, mem_tag);
}
// MallocLimt: Given an allocation size s, check if mallocing this much
// under category f would hit either the global limit or the limit for category f.
static inline bool check_exceeds_limit(size_t s, MEMFLAGS f);
// for MemTag would hit either the global limit or the limit for MemTag.
static inline bool check_exceeds_limit(size_t s, MemTag mem_tag);
// Given a pointer, look for the containing malloc block.
// Print the block. Note that since there is very low risk of memory looking

View File

@ -32,7 +32,7 @@
#include "utilities/globalDefinitions.hpp"
// Returns true if allocating s bytes on f would trigger either global or the category limit
inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) {
inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MemTag mem_tag) {
// Note: checks are ordered to have as little impact as possible on the standard code path,
// when MallocLimit is unset, resp. it is set but we have reached no limit yet.
@ -50,12 +50,12 @@ inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) {
}
} else {
// Category Limit?
l = MallocLimitHandler::category_limit(f);
l = MallocLimitHandler::category_limit(mem_tag);
if (l->sz > 0) {
const MallocMemory* mm = as_snapshot()->by_type(f);
const MallocMemory* mm = as_snapshot()->by_type(mem_tag);
size_t so_far = mm->malloc_size() + mm->arena_size();
if ((so_far + s) > l->sz) {
return category_limit_reached(f, s, so_far, l);
return category_limit_reached(mem_tag, s, so_far, l);
}
}
}
@ -64,8 +64,8 @@ inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) {
return false;
}
inline bool MallocTracker::check_exceeds_limit(size_t s, MEMFLAGS f) {
return MallocMemorySummary::check_exceeds_limit(s, f);
inline bool MallocTracker::check_exceeds_limit(size_t s, MemTag mem_tag) {
return MallocMemorySummary::check_exceeds_limit(s, mem_tag);
}

View File

@ -61,11 +61,11 @@ int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
// Sort into allocation site addresses and memory type order for baseline comparison
// Sort into allocation site addresses and memory tag order for baseline comparison
int compare_malloc_site_and_type(const MallocSite& s1, const MallocSite& s2) {
int res = compare_malloc_site(s1, s2);
if (res == 0) {
res = (int)(NMTUtil::flag_to_index(s1.flag()) - NMTUtil::flag_to_index(s2.flag()));
res = (int)(NMTUtil::tag_to_index(s1.mem_tag()) - NMTUtil::tag_to_index(s2.mem_tag()));
}
return res;
@ -207,7 +207,7 @@ bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
const ReservedMemoryRegion* rgn;
VirtualMemoryAllocationSite* site;
while ((rgn = itr.next()) != nullptr) {
VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->flag());
VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->mem_tag());
site = allocation_sites.find(tmp);
if (site == nullptr) {
LinkedListNode<VirtualMemoryAllocationSite>* node =

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ class MemBaseline {
by_address, // by memory address
by_size, // by memory size
by_site, // by call site where the memory is allocated from
by_site_and_type // by call site and memory type
by_site_and_type // by call site and memory tag
};
private:
@ -144,14 +144,14 @@ class MemBaseline {
return bl->_malloc_memory_snapshot.malloc_overhead();
}
MallocMemory* malloc_memory(MEMFLAGS flag) {
MallocMemory* malloc_memory(MemTag mem_tag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.by_type(flag);
return _malloc_memory_snapshot.by_type(mem_tag);
}
VirtualMemory* virtual_memory(MEMFLAGS flag) {
VirtualMemory* virtual_memory(MemTag mem_tag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _virtual_memory_snapshot.by_type(flag);
return _virtual_memory_snapshot.by_type(mem_tag);
}
@ -203,7 +203,7 @@ class MemBaseline {
void malloc_sites_to_size_order();
// Sort allocation sites in call site address order
void malloc_sites_to_allocation_site_order();
// Sort allocation sites in call site address and memory type order
// Sort allocation sites in call site address and memory tag order
void malloc_sites_to_allocation_site_and_type_order();
// Sort allocation sites in reserved size order

View File

@ -32,8 +32,8 @@
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
#include "memory/resourceArea.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memFlagBitmap.hpp"
#include "nmt/memTag.hpp"
#include "nmt/memTagBitmap.hpp"
#include "nmt/memMapPrinter.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
@ -50,9 +50,9 @@
/// NMT mechanics
// Short, clear, descriptive names for all possible markers. Note that we only expect to see
// those that have been used with mmap. Flags left out are printed with their nmt flag name.
// those that have been used with mmap. Flags left out are printed with their nmt tags name.
#define NMT_FLAGS_DO(f) \
/* flag, short, description */ \
/* mem_tag, short, description */ \
f(mtGCCardSet, "CARDTBL", "GC Card table") \
f(mtClassShared, "CDS", "CDS archives") \
f(mtClass, "CLASS", "Class Space") \
@ -67,11 +67,11 @@
f(mtTest, "TEST", "JVM internal test mappings")
//end
static const char* get_shortname_for_nmt_flag(MEMFLAGS f) {
#define DO(flag, shortname, text) if (flag == f) return shortname;
static const char* get_shortname_for_mem_tag(MemTag mem_tag) {
#define DO(t, shortname, text) if (t == mem_tag) return shortname;
NMT_FLAGS_DO(DO)
#undef DO
return NMTUtil::flag_to_enum_name(f);
return NMTUtil::tag_to_enum_name(mem_tag);
}
/// NMT virtual memory
@ -80,7 +80,7 @@ static bool range_intersects(const void* from1, const void* to1, const void* fro
return MAX2(from1, from2) < MIN2(to1, to2);
}
// A Cache that correlates range with MEMFLAG, optimized to be iterated quickly
// A Cache that correlates range with MemTag, optimized to be iterated quickly
// (cache friendly).
class CachedNMTInformation : public VirtualMemoryWalker {
struct Range { const void* from; const void* to; };
@ -88,24 +88,24 @@ class CachedNMTInformation : public VirtualMemoryWalker {
// structure would have, and it allows for faster iteration of ranges since more
// of them fit into a cache line.
Range* _ranges;
MEMFLAGS* _flags;
MemTag* _mem_tags;
size_t _count, _capacity;
mutable size_t _last;
public:
CachedNMTInformation() : _ranges(nullptr), _flags(nullptr),
CachedNMTInformation() : _ranges(nullptr), _mem_tags(nullptr),
_count(0), _capacity(0), _last(0) {}
~CachedNMTInformation() {
ALLOW_C_FUNCTION(free, ::free(_ranges);)
ALLOW_C_FUNCTION(free, ::free(_flags);)
ALLOW_C_FUNCTION(free, ::free(_mem_tags);)
}
bool add(const void* from, const void* to, MEMFLAGS f) {
bool add(const void* from, const void* to, MemTag mem_tag) {
// We rely on NMT regions being sorted by base
assert(_count == 0 || (from >= _ranges[_count - 1].to), "NMT regions unordered?");
// we can just fold two regions if they are adjacent and have the same flag.
if (_count > 0 && from == _ranges[_count - 1].to && f == _flags[_count - 1]) {
// we can just fold two regions if they are adjacent and have the same mem_tag.
if (_count > 0 && from == _ranges[_count - 1].to && mem_tag == _mem_tags[_count - 1]) {
_ranges[_count - 1].to = to;
return true;
}
@ -114,8 +114,8 @@ public:
const size_t new_capacity = MAX2((size_t)4096, 2 * _capacity);
// Unfortunately, we need to allocate manually, raw, since we must prevent NMT deadlocks (ThreadCritical).
ALLOW_C_FUNCTION(realloc, _ranges = (Range*)::realloc(_ranges, new_capacity * sizeof(Range));)
ALLOW_C_FUNCTION(realloc, _flags = (MEMFLAGS*)::realloc(_flags, new_capacity * sizeof(MEMFLAGS));)
if (_ranges == nullptr || _flags == nullptr) {
ALLOW_C_FUNCTION(realloc, _mem_tags = (MemTag*)::realloc(_mem_tags, new_capacity * sizeof(MemTag));)
if (_ranges == nullptr || _mem_tags == nullptr) {
// In case of OOM lets make no fuss. Just return.
return false;
}
@ -123,14 +123,14 @@ public:
}
assert(_capacity > _count, "Sanity");
_ranges[_count] = Range { from, to };
_flags[_count] = f;
_mem_tags[_count] = mem_tag;
_count++;
return true;
}
// Given a vma [from, to), find all regions that intersect with this vma and
// return their collective flags.
MemFlagBitmap lookup(const void* from, const void* to) const {
MemTagBitmap lookup(const void* from, const void* to) const {
assert(from <= to, "Sanity");
// We optimize for sequential lookups. Since this class is used when a list
// of OS mappings is scanned (VirtualQuery, /proc/pid/maps), and these lists
@ -139,10 +139,10 @@ public:
// the range is to the right of the given section, we need to re-start the search
_last = 0;
}
MemFlagBitmap bm;
MemTagBitmap bm;
for(uintx i = _last; i < _count; i++) {
if (range_intersects(from, to, _ranges[i].from, _ranges[i].to)) {
bm.set_flag(_flags[i]);
bm.set_tag(_mem_tags[i]);
} else if (to <= _ranges[i].from) {
_last = i;
break;
@ -153,7 +153,7 @@ public:
bool do_allocation_site(const ReservedMemoryRegion* rgn) override {
// Cancel iteration if we run out of memory (add returns false);
return add(rgn->base(), rgn->end(), rgn->flag());
return add(rgn->base(), rgn->end(), rgn->mem_tag());
}
// Iterate all NMT virtual memory regions and fill this cache.
@ -247,16 +247,16 @@ bool MappingPrintSession::print_nmt_info_for_region(const void* vma_from, const
// print NMT information, if available
if (MemTracker::enabled()) {
// Correlate vma region (from, to) with NMT region(s) we collected previously.
const MemFlagBitmap flags = _nmt_info.lookup(vma_from, vma_to);
const MemTagBitmap flags = _nmt_info.lookup(vma_from, vma_to);
if (flags.has_any()) {
for (int i = 0; i < mt_number_of_types; i++) {
const MEMFLAGS flag = (MEMFLAGS)i;
if (flags.has_flag(flag)) {
for (int i = 0; i < mt_number_of_tags; i++) {
const MemTag mem_tag = (MemTag)i;
if (flags.has_tag(mem_tag)) {
if (num_printed > 0) {
_out->put(',');
}
_out->print("%s", get_shortname_for_nmt_flag(flag));
if (flag == mtThreadStack) {
_out->print("%s", get_shortname_for_mem_tag(mem_tag));
if (mem_tag == mtThreadStack) {
print_thread_details_for_supposed_stack_address(vma_from, vma_to, _out);
}
num_printed++;

View File

@ -27,7 +27,7 @@
#define SHARE_SERVICES_MEMMAPPRINTER_HPP
#include "memory/allStatic.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
#ifdef LINUX

View File

@ -26,7 +26,7 @@
#include "memory/metaspace.hpp"
#include "memory/metaspaceUtils.hpp"
#include "nmt/mallocTracker.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "nmt/memReporter.hpp"
#include "nmt/memoryFileTracker.hpp"
#include "nmt/threadStackTracker.hpp"
@ -70,17 +70,17 @@ void MemReporterBase::print_total(size_t reserved, size_t committed, size_t peak
}
}
void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const {
void MemReporterBase::print_malloc(const MemoryCounter* c, MemTag mem_tag) const {
const char* scale = current_scale();
outputStream* out = output();
const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc=";
const char* alloc_type = (mem_tag == mtThreadStack) ? "" : "malloc=";
const size_t amount = c->size();
const size_t count = c->count();
if (flag != mtNone) {
if (mem_tag != mtNone) {
out->print("(%s" SIZE_FORMAT "%s type=%s", alloc_type,
amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag));
amount_in_current_scale(amount), scale, NMTUtil::tag_to_name(mem_tag));
} else {
out->print("(%s" SIZE_FORMAT "%s", alloc_type,
amount_in_current_scale(amount), scale);
@ -176,31 +176,31 @@ void MemSummaryReporter::report() {
out->cr();
out->cr();
// Summary by memory type
for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index);
// Summary by memory tag
for (int index = 0; index < mt_number_of_tags; index ++) {
MemTag mem_tag = NMTUtil::index_to_tag(index);
// thread stack is reported as part of thread category
if (flag == mtThreadStack) continue;
MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
if (mem_tag == mtThreadStack) continue;
MallocMemory* malloc_memory = _malloc_snapshot->by_type(mem_tag);
VirtualMemory* virtual_memory = _vm_snapshot->by_type(mem_tag);
report_summary_of_type(flag, malloc_memory, virtual_memory);
report_summary_of_type(mem_tag, malloc_memory, virtual_memory);
}
}
void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
void MemSummaryReporter::report_summary_of_type(MemTag mem_tag,
MallocMemory* malloc_memory, VirtualMemory* virtual_memory) {
size_t reserved_amount = reserved_total (malloc_memory, virtual_memory);
size_t committed_amount = committed_total(malloc_memory, virtual_memory);
// Count thread's native stack in "Thread" category
if (flag == mtThread) {
if (mem_tag == mtThread) {
const VirtualMemory* thread_stack_usage =
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
reserved_amount += thread_stack_usage->reserved();
committed_amount += thread_stack_usage->committed();
} else if (flag == mtNMT) {
} else if (mem_tag == mtNMT) {
// Count malloc headers in "NMT" category
reserved_amount += _malloc_snapshot->malloc_overhead();
committed_amount += _malloc_snapshot->malloc_overhead();
@ -219,10 +219,10 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
outputStream* out = output();
const char* scale = current_scale();
constexpr int indent = 28;
out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag));
print_total(reserved_amount, committed_amount);
#if INCLUDE_CDS
if (flag == mtClassShared) {
if (mem_tag == mtClassShared) {
size_t read_only_bytes = FileMapInfo::readonly_total();
output()->print(", readonly=" SIZE_FORMAT "%s",
amount_in_current_scale(read_only_bytes), scale);
@ -232,12 +232,12 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
streamIndentor si(out, indent);
if (flag == mtClass) {
if (mem_tag == mtClass) {
// report class count
out->print_cr("(classes #" SIZE_FORMAT ")", (_instance_class_count + _array_class_count));
out->print_cr("( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
_instance_class_count, _array_class_count);
} else if (flag == mtThread) {
} else if (mem_tag == mtThread) {
const VirtualMemory* thread_stack_usage =
_vm_snapshot->by_type(mtThreadStack);
// report thread count
@ -263,11 +263,11 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
out->cr();
}
if (flag == mtNMT &&
if (mem_tag == mtNMT &&
amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) {
out->print_cr("(tracking overhead=" SIZE_FORMAT "%s)",
amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale);
} else if (flag == mtClass) {
} else if (mem_tag == mtClass) {
// Metadata information
report_metadata(Metaspace::NonClassType);
if (Metaspace::using_class_space()) {
@ -338,12 +338,12 @@ int MemDetailReporter::report_malloc_sites() {
}
const NativeCallStack* stack = malloc_site->call_stack();
_stackprinter.print_stack(stack);
MEMFLAGS flag = malloc_site->flag();
assert(NMTUtil::flag_is_valid(flag) && flag != mtNone,
"Must have a valid memory type");
MemTag mem_tag = malloc_site->mem_tag();
assert(NMTUtil::tag_is_valid(mem_tag) && mem_tag != mtNone,
"Must have a valid memory tag");
INDENT_BY(29,
out->print("(");
print_malloc(malloc_site->counter(), flag);
print_malloc(malloc_site->counter(), mem_tag);
out->print_cr(")");
)
out->cr();
@ -378,9 +378,9 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() {
INDENT_BY(29,
out->print("(");
print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
const MEMFLAGS flag = virtual_memory_site->flag();
if (flag != mtNone) {
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
const MemTag mem_tag = virtual_memory_site->mem_tag();
if (mem_tag != mtNone) {
out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag));
}
out->print_cr(")");
)
@ -423,7 +423,7 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion*
const char* region_type = (all_committed ? "reserved and committed" : "reserved");
out->cr();
print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
out->print(" for %s", NMTUtil::tag_to_name(reserved_rgn->mem_tag()));
if (stack->is_empty()) {
out->cr();
} else {
@ -519,31 +519,31 @@ void MemSummaryDiffReporter::report_diff() {
out->cr();
out->cr();
// Summary diff by memory type
for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index);
// Summary diff by memory tag
for (int index = 0; index < mt_number_of_tags; index ++) {
MemTag mem_tag = NMTUtil::index_to_tag(index);
// thread stack is reported as part of thread category
if (flag == mtThreadStack) continue;
diff_summary_of_type(flag,
_early_baseline.malloc_memory(flag),
_early_baseline.virtual_memory(flag),
if (mem_tag == mtThreadStack) continue;
diff_summary_of_type(mem_tag,
_early_baseline.malloc_memory(mem_tag),
_early_baseline.virtual_memory(mem_tag),
_early_baseline.metaspace_stats(),
_current_baseline.malloc_memory(flag),
_current_baseline.virtual_memory(flag),
_current_baseline.malloc_memory(mem_tag),
_current_baseline.virtual_memory(mem_tag),
_current_baseline.metaspace_stats());
}
}
void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count, MEMFLAGS flags) const {
size_t early_amount, size_t early_count, MemTag mem_tag) const {
const char* scale = current_scale();
outputStream* out = output();
const char* alloc_type = (flags == mtThread) ? "" : "malloc=";
const char* alloc_tag = (mem_tag == mtThread) ? "" : "malloc=";
out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale);
out->print("%s" SIZE_FORMAT "%s", alloc_tag, amount_in_current_scale(current_amount), scale);
// Report type only if it is valid and not under "thread" category
if (flags != mtNone && flags != mtThread) {
out->print(" type=%s", NMTUtil::flag_to_name(flags));
if (mem_tag != mtNone && mem_tag != mtThread) {
out->print(" type=%s", NMTUtil::tag_to_name(mem_tag));
}
int64_t amount_diff = diff_in_current_scale(current_amount, early_amount);
@ -594,7 +594,7 @@ void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved,
}
void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
void MemSummaryDiffReporter::diff_summary_of_type(MemTag mem_tag,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MetaspaceCombinedStats& early_ms,
const MallocMemory* current_malloc, const VirtualMemory* current_vm,
@ -613,7 +613,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
size_t early_committed_amount = committed_total(early_malloc, early_vm);
// Adjust virtual memory total
if (flag == mtThread) {
if (mem_tag == mtThread) {
const VirtualMemory* early_thread_stack_usage =
_early_baseline.virtual_memory(mtThreadStack);
const VirtualMemory* current_thread_stack_usage =
@ -624,7 +624,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
current_reserved_amount += current_thread_stack_usage->reserved();
current_committed_amount += current_thread_stack_usage->committed();
} else if (flag == mtNMT) {
} else if (mem_tag == mtNMT) {
early_reserved_amount += _early_baseline.malloc_tracking_overhead();
early_committed_amount += _early_baseline.malloc_tracking_overhead();
@ -636,7 +636,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
// print summary line
out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag));
out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag));
print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
early_reserved_amount, early_committed_amount);
out->print_cr(")");
@ -644,7 +644,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
streamIndentor si(out, indent);
// detail lines
if (flag == mtClass) {
if (mem_tag == mtClass) {
// report class count
out->print("(classes #" SIZE_FORMAT, _current_baseline.class_count());
const ssize_t class_count_diff =
@ -668,7 +668,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
}
out->print_cr(")");
} else if (flag == mtThread) {
} else if (mem_tag == mtThread) {
// report thread count
out->print("(threads #" SIZE_FORMAT, _current_baseline.thread_count());
const ssize_t thread_count_diff = counter_diff(_current_baseline.thread_count(), _early_baseline.thread_count());
@ -696,7 +696,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
if (amount_in_current_scale(current_malloc_amount) > 0 ||
diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
out->print("(");
print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
print_malloc_diff(current_malloc_amount, (mem_tag == mtChunk) ? 0 : current_malloc->malloc_count(),
early_malloc_amount, early_malloc->malloc_count(), mtNone);
out->print_cr(")");
}
@ -720,7 +720,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
}
// Report native memory tracking overhead
if (flag == mtNMT) {
if (mem_tag == mtNMT) {
size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
@ -733,7 +733,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
out->print(" " INT64_PLUS_FORMAT "%s", overhead_diff, scale);
}
out->print_cr(")");
} else if (flag == mtClass) {
} else if (mem_tag == mtClass) {
print_metaspace_diff(current_ms, early_ms);
}
out->cr();
@ -847,9 +847,9 @@ void MemDetailDiffReporter::diff_virtual_memory_sites() const {
} else if (compVal > 0) {
old_virtual_memory_site(early_site);
early_site = early_itr.next();
} else if (early_site->flag() != current_site->flag()) {
// This site was originally allocated with one flag, then released,
// then re-allocated at the same site (as far as we can tell) with a different flag.
} else if (early_site->mem_tag() != current_site->mem_tag()) {
// This site was originally allocated with one memory tag, then released,
// then re-allocated at the same site (as far as we can tell) with a different memory tag.
old_virtual_memory_site(early_site);
early_site = early_itr.next();
new_virtual_memory_site(current_site);
@ -866,29 +866,29 @@ void MemDetailDiffReporter::diff_virtual_memory_sites() const {
void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
0, 0, malloc_site->flag());
0, 0, malloc_site->mem_tag());
}
void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
malloc_site->count(), malloc_site->flag());
malloc_site->count(), malloc_site->mem_tag());
}
void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
const MallocSite* current) const {
if (early->flag() != current->flag()) {
if (early->mem_tag() != current->mem_tag()) {
// If malloc site type changed, treat it as deallocation of old type and
// allocation of new type.
old_malloc_site(early);
new_malloc_site(current);
} else {
diff_malloc_site(current->call_stack(), current->size(), current->count(),
early->size(), early->count(), early->flag());
early->size(), early->count(), early->mem_tag());
}
}
void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const {
size_t current_count, size_t early_size, size_t early_count, MemTag mem_tag) const {
outputStream* out = output();
assert(stack != nullptr, "null stack");
@ -900,7 +900,7 @@ void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_
_stackprinter.print_stack(stack);
INDENT_BY(28,
out->print("(");
print_malloc_diff(current_size, current_count, early_size, early_count, flags);
print_malloc_diff(current_size, current_count, early_size, early_count, mem_tag);
out->print_cr(")");
)
out->cr();
@ -909,21 +909,21 @@ void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_
void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag());
diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->mem_tag());
}
void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag());
diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->mem_tag());
}
void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
const VirtualMemoryAllocationSite* current) const {
diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
early->reserved(), early->committed(), current->flag());
early->reserved(), early->committed(), current->mem_tag());
}
void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const {
size_t current_committed, size_t early_reserved, size_t early_committed, MemTag mem_tag) const {
outputStream* out = output();
// no change
@ -936,8 +936,8 @@ void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stac
INDENT_BY(28,
out->print("(mmap: ");
print_virtual_memory_diff(current_reserved, current_committed, early_reserved, early_committed);
if (flag != mtNone) {
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
if (mem_tag != mtNone) {
out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag));
}
out->print_cr(")");
)

View File

@ -108,7 +108,7 @@ class MemReporterBase : public StackObj {
// Print summary total, malloc and virtual memory
void print_total(size_t reserved, size_t committed, size_t peak = 0) const;
void print_malloc(const MemoryCounter* c, MEMFLAGS flag = mtNone) const;
void print_malloc(const MemoryCounter* c, MemTag mem_tag = mtNone) const;
void print_virtual_memory(size_t reserved, size_t committed, size_t peak) const;
void print_arena(const MemoryCounter* c) const;
@ -138,8 +138,8 @@ class MemSummaryReporter : public MemReporterBase {
// Generate summary report
virtual void report();
private:
// Report summary for each memory type
void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
// Report summary for each memory tag
void report_summary_of_type(MemTag mem_tag, MallocMemory* malloc_memory,
VirtualMemory* virtual_memory);
void report_metadata(Metaspace::MetadataType type) const;
@ -203,8 +203,8 @@ class MemSummaryDiffReporter : public MemReporterBase {
virtual void report_diff();
private:
// report the comparison of each memory type
void diff_summary_of_type(MEMFLAGS type,
// report the comparison of each mem_tag
void diff_summary_of_type(MemTag mem_tag,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MetaspaceCombinedStats& early_ms,
const MallocMemory* current_malloc, const VirtualMemory* current_vm,
@ -212,7 +212,7 @@ class MemSummaryDiffReporter : public MemReporterBase {
protected:
void print_malloc_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count, MEMFLAGS flags) const;
size_t early_amount, size_t early_count, MemTag mem_tag) const;
void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
size_t early_reserved, size_t early_committed) const;
void print_arena_diff(size_t current_amount, size_t current_count,
@ -262,9 +262,9 @@ class MemDetailDiffReporter : public MemSummaryDiffReporter {
const VirtualMemoryAllocationSite* current) const;
void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
size_t currrent_count, size_t early_size, size_t early_count, MEMFLAGS flags) const;
size_t currrent_count, size_t early_size, size_t early_count, MemTag mem_tag) const;
void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const;
size_t current_committed, size_t early_reserved, size_t early_committed, MemTag mem_tag) const;
};
#endif // SHARE_NMT_MEMREPORTER_HPP

View File

@ -22,13 +22,13 @@
*
*/
#ifndef SHARE_NMT_MEMFLAGS_HPP
#define SHARE_NMT_MEMFLAGS_HPP
#ifndef SHARE_NMT_MEM_TAG_HPP
#define SHARE_NMT_MEM_TAG_HPP
#include "utilities/globalDefinitions.hpp"
#define MEMORY_TYPES_DO(f) \
/* Memory type by sub systems. It occupies lower byte. */ \
#define MEMORY_TAG_DO(f) \
/* Memory tag by sub systems. It occupies lower byte. */ \
f(mtJavaHeap, "Java Heap") /* Java heap */ \
f(mtClass, "Class") /* Java classes */ \
f(mtThread, "Thread") /* thread objects */ \
@ -61,22 +61,22 @@
f(mtNone, "Unknown") \
//end
#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \
type,
#define MEMORY_TAG_DECLARE_ENUM(mem_tag, human_readable) \
mem_tag,
enum class MEMFLAGS : uint8_t {
MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM)
mt_number_of_types // number of memory types (mtDontTrack
// is not included as validate type)
enum class MemTag : uint8_t {
MEMORY_TAG_DO(MEMORY_TAG_DECLARE_ENUM)
mt_number_of_tags // number of memory tags (mtDontTrack
// is not included as validate tag)
};
#define MEMORY_TYPE_SHORTNAME(type, human_readable) \
constexpr MEMFLAGS type = MEMFLAGS::type;
#define MEMORY_TAG_SHORTNAME(mem_tag, human_readable) \
constexpr MemTag mem_tag = MemTag::mem_tag;
// Generate short aliases for the enum values. E.g. mtGC instead of MEMFLAGS::mtGC.
MEMORY_TYPES_DO(MEMORY_TYPE_SHORTNAME)
// Generate short aliases for the enum values. E.g. mtGC instead of MemTag::mtGC.
MEMORY_TAG_DO(MEMORY_TAG_SHORTNAME)
// Make an int version of the sentinel end value.
constexpr int mt_number_of_types = static_cast<int>(MEMFLAGS::mt_number_of_types);
constexpr int mt_number_of_tags = static_cast<int>(MemTag::mt_number_of_tags);
#endif // SHARE_NMT_MEMFLAGS_HPP
#endif // SHARE_NMT_MEM_TAG_HPP

View File

@ -23,34 +23,34 @@
*
*/
#ifndef SHARE_NMT_MEMFLAGBITMAP_HPP
#define SHARE_NMT_MEMFLAGBITMAP_HPP
#ifndef SHARE_NMT_MEMTAGBITMAP_HPP
#define SHARE_NMT_MEMTAGBITMAP_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
class MemFlagBitmap {
class MemTagBitmap {
uint32_t _v;
STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_types);
STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_tags);
public:
MemFlagBitmap(uint32_t v = 0) : _v(v) {}
MemFlagBitmap(const MemFlagBitmap& o) : _v(o._v) {}
MemTagBitmap(uint32_t v = 0) : _v(v) {}
MemTagBitmap(const MemTagBitmap& o) : _v(o._v) {}
uint32_t raw_value() const { return _v; }
void set_flag(MEMFLAGS f) {
const int bitno = (int)f;
void set_tag(MemTag mem_tag) {
const int bitno = (int)mem_tag;
_v |= nth_bit(bitno);
}
bool has_flag(MEMFLAGS f) const {
const int bitno = (int)f;
bool has_tag(MemTag mem_tag) const {
const int bitno = (int)mem_tag;
return _v & nth_bit(bitno);
}
bool has_any() const { return _v > 0; }
};
#endif // SHARE_NMT_NMTUSAGE_HPP
#endif // SHARE_NMT_MEMTAGBITMAP_HPP

View File

@ -63,7 +63,7 @@ void MemTracker::initialize() {
// Memory type is encoded into tracking header as a byte field,
// make sure that we don't overflow it.
STATIC_ASSERT(mt_number_of_types <= max_jubyte);
STATIC_ASSERT(mt_number_of_tags <= max_jubyte);
if (level > NMT_off) {
if (!MallocTracker::initialize(level) ||

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,11 +75,11 @@ class MemTracker : AllStatic {
return enabled() ? MallocTracker::overhead_per_malloc : 0;
}
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
static inline void* record_malloc(void* mem_base, size_t size, MemTag mem_tag,
const NativeCallStack& stack) {
assert(mem_base != nullptr, "caller should handle null");
if (enabled()) {
return MallocTracker::record_malloc(mem_base, size, flag, stack);
return MallocTracker::record_malloc(mem_base, size, mem_tag, stack);
}
return mem_base;
}
@ -99,34 +99,34 @@ class MemTracker : AllStatic {
}
// Record creation of an arena
static inline void record_new_arena(MEMFLAGS flag) {
static inline void record_new_arena(MemTag mem_tag) {
if (!enabled()) return;
MallocTracker::record_new_arena(flag);
MallocTracker::record_new_arena(mem_tag);
}
// Record destruction of an arena
static inline void record_arena_free(MEMFLAGS flag) {
static inline void record_arena_free(MemTag mem_tag) {
if (!enabled()) return;
MallocTracker::record_arena_free(flag);
MallocTracker::record_arena_free(mem_tag);
}
// Record arena size change. Arena size is the size of all arena
// chunks that are backing up the arena.
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) {
static inline void record_arena_size_change(ssize_t diff, MemTag mem_tag) {
if (!enabled()) return;
MallocTracker::record_arena_size_change(diff, flag);
MallocTracker::record_arena_size_change(diff, mem_tag);
}
// Note: virtual memory operations should only ever be called after NMT initialization
// (we do not do any reservations before that).
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) {
MemTag mem_tag = mtNone) {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
ThreadCritical tc;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag);
}
}
@ -147,12 +147,12 @@ class MemTracker : AllStatic {
}
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
const NativeCallStack& stack, MemTag mem_tag = mtNone) {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
ThreadCritical tc;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag);
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
}
@ -183,12 +183,12 @@ class MemTracker : AllStatic {
}
static inline void allocate_memory_in(MemoryFileTracker::MemoryFile* file, size_t offset, size_t size,
const NativeCallStack& stack, MEMFLAGS flag) {
const NativeCallStack& stack, MemTag mem_tag) {
assert_post_init();
if (!enabled()) return;
assert(file != nullptr, "must be");
MemoryFileTracker::Instance::Locker lock;
MemoryFileTracker::Instance::allocate_memory(file, offset, size, stack, flag);
MemoryFileTracker::Instance::allocate_memory(file, offset, size, stack, mem_tag);
}
static inline void free_memory_in(MemoryFileTracker::MemoryFile* file,
@ -206,21 +206,21 @@ class MemTracker : AllStatic {
//
// The two new memory regions will be both registered under stack and
// memory flags of the original region.
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
ThreadCritical tc;
VirtualMemoryTracker::split_reserved_region((address)addr, size, split, flag, split_flag);
VirtualMemoryTracker::split_reserved_region((address)addr, size, split, mem_tag, split_tag);
}
}
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
static inline void record_virtual_memory_tag(void* addr, MemTag mem_tag) {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
ThreadCritical tc;
VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
VirtualMemoryTracker::set_reserved_region_type((address)addr, mem_tag);
}
}
@ -262,8 +262,8 @@ class MemTracker : AllStatic {
static void tuning_statistics(outputStream* out);
// MallocLimt: Given an allocation size s, check if mallocing this much
// under category f would hit either the global limit or the limit for category f.
static inline bool check_exceeds_limit(size_t s, MEMFLAGS f);
// for MemTag would hit either the global limit or the limit for MemTag.
static inline bool check_exceeds_limit(size_t s, MemTag mem_tag);
// Given an unknown pointer, check if it points into a known region; print region if found
// and return true; false if not found.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -30,11 +30,11 @@
#include "nmt/mallocTracker.inline.hpp"
inline bool MemTracker::check_exceeds_limit(size_t s, MEMFLAGS f) {
inline bool MemTracker::check_exceeds_limit(size_t s, MemTag mem_tag) {
if (!enabled()) {
return false;
}
return MallocTracker::check_exceeds_limit(s, f);
return MallocTracker::check_exceeds_limit(s, mem_tag);
}
#endif // SHARE_NMT_MEMTRACKER_INLINE_HPP

View File

@ -42,23 +42,23 @@ MemoryFileTracker::MemoryFileTracker(bool is_detailed_mode)
void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
size_t size, const NativeCallStack& stack,
MEMFLAGS flag) {
MemTag mem_tag) {
NativeCallStackStorage::StackIndex sidx = _stack_storage.push(stack);
VMATree::RegionData regiondata(sidx, flag);
VMATree::RegionData regiondata(sidx, mem_tag);
VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata);
for (int i = 0; i < mt_number_of_types; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i));
summary->reserve_memory(diff.flag[i].commit);
summary->commit_memory(diff.flag[i].commit);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);
summary->commit_memory(diff.tag[i].commit);
}
}
void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) {
VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size);
for (int i = 0; i < mt_number_of_types; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i));
summary->reserve_memory(diff.flag[i].commit);
summary->commit_memory(diff.flag[i].commit);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);
summary->commit_memory(diff.tag[i].commit);
}
}
@ -79,7 +79,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st
return;
}
#ifdef ASSERT
if (broken_start != nullptr && prev->val().out.type() != current->val().in.type()) {
if (broken_start != nullptr && prev->val().out.mem_tag() != current->val().in.mem_tag()) {
broken_start = prev;
broken_end = current;
}
@ -91,7 +91,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st
start_addr, end_addr,
NMTUtil::amount_in_scale(end_addr - start_addr, scale),
NMTUtil::scale_name(scale),
NMTUtil::flag_to_name(prev->val().out.flag()));
NMTUtil::tag_to_name(prev->val().out.mem_tag()));
{
streamIndentor si(stream, 4);
_stack_storage.get(prev->val().out.stack()).print_on(stream);
@ -138,8 +138,8 @@ bool MemoryFileTracker::Instance::initialize(NMT_TrackingLevel tracking_level) {
void MemoryFileTracker::Instance::allocate_memory(MemoryFile* file, size_t offset,
size_t size, const NativeCallStack& stack,
MEMFLAGS flag) {
_tracker->allocate_memory(file, offset, size, stack, flag);
MemTag mem_tag) {
_tracker->allocate_memory(file, offset, size, stack, mem_tag);
}
void MemoryFileTracker::Instance::free_memory(MemoryFile* file, size_t offset, size_t size) {
@ -181,9 +181,9 @@ const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& MemoryFileTrack
void MemoryFileTracker::summary_snapshot(VirtualMemorySnapshot* snapshot) const {
for (int d = 0; d < _files.length(); d++) {
const MemoryFile* file = _files.at(d);
for (int i = 0; i < mt_number_of_types; i++) {
VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_flag(i));
const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_flag(i));
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_tag(i));
const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_tag(i));
// Only account the committed memory.
snap->commit_memory(current->committed());
}

View File

@ -66,7 +66,7 @@ public:
MemoryFileTracker(bool is_detailed_mode);
void allocate_memory(MemoryFile* file, size_t offset, size_t size, const NativeCallStack& stack,
MEMFLAGS flag);
MemTag mem_tag);
void free_memory(MemoryFile* file, size_t offset, size_t size);
MemoryFile* make_file(const char* descriptive_name);
@ -96,7 +96,7 @@ public:
static void free_file(MemoryFile* device);
static void allocate_memory(MemoryFile* device, size_t offset, size_t size,
const NativeCallStack& stack, MEMFLAGS flag);
const NativeCallStack& stack, MemTag mem_tag);
static void free_memory(MemoryFile* device, size_t offset, size_t size);
static void summary_snapshot(VirtualMemorySnapshot* snapshot);

View File

@ -27,7 +27,7 @@
#define SHARE_NMT_NATIVECALLSTACKPRINTER_HPP
#include "memory/arena.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/resourceHash.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,11 @@ STATIC_ASSERT(NMT_off > NMT_unknown);
STATIC_ASSERT(NMT_summary > NMT_off);
STATIC_ASSERT(NMT_detail > NMT_summary);
#define MEMORY_TYPE_DECLARE_NAME(type, human_readable) \
#define MEMORY_TAG_DECLARE_NAME(type, human_readable) \
{ #type, human_readable },
NMTUtil::S NMTUtil::_strings[] = {
MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_NAME)
MEMORY_TAG_DO(MEMORY_TAG_DECLARE_NAME)
};
const char* NMTUtil::scale_name(size_t scale) {
@ -87,14 +87,14 @@ NMT_TrackingLevel NMTUtil::parse_tracking_level(const char* s) {
return NMT_unknown;
}
MEMFLAGS NMTUtil::string_to_flag(const char* s) {
for (int i = 0; i < mt_number_of_types; i ++) {
MemTag NMTUtil::string_to_mem_tag(const char* s) {
for (int i = 0; i < mt_number_of_tags; i ++) {
assert(::strlen(_strings[i].enum_s) > 2, "Sanity"); // should always start with "mt"
if (::strcasecmp(_strings[i].human_readable, s) == 0 ||
::strcasecmp(_strings[i].enum_s, s) == 0 ||
::strcasecmp(_strings[i].enum_s + 2, s) == 0) // "mtXXX" -> match also "XXX" or "xxx"
{
return (MEMFLAGS)i;
return (MemTag)i;
}
}
return mtNone;

View File

@ -28,7 +28,7 @@
#define SHARE_NMT_NMTCOMMON_HPP
#include "memory/allStatic.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@ -75,37 +75,37 @@ const int NMT_TrackingStackDepth = 4;
// A few common utilities for native memory tracking
class NMTUtil : AllStatic {
public:
// Check if index is a valid MEMFLAGS enum value (including mtNone)
static inline bool flag_index_is_valid(int index) {
return index >= 0 && index < mt_number_of_types;
// Check if index is a valid MemTag enum value (including mtNone)
static inline bool tag_index_is_valid(int index) {
return index >= 0 && index < mt_number_of_tags;
}
// Check if flag value is a valid MEMFLAGS enum value (including mtNone)
static inline bool flag_is_valid(MEMFLAGS flag) {
const int index = static_cast<int>(flag);
return flag_index_is_valid(index);
// Check if tag value is a valid MemTag enum value (including mtNone)
static inline bool tag_is_valid(MemTag mem_tag) {
const int index = static_cast<int>(mem_tag);
return tag_index_is_valid(index);
}
// Map memory type to index
static inline int flag_to_index(MEMFLAGS flag) {
assert(flag_is_valid(flag), "Invalid flag (%u)", (unsigned)flag);
return static_cast<int>(flag);
// Map memory tag to index
static inline int tag_to_index(MemTag mem_tag) {
assert(tag_is_valid(mem_tag), "Invalid type (%u)", (unsigned)mem_tag);
return static_cast<int>(mem_tag);
}
// Map memory type to human readable name
static const char* flag_to_name(MEMFLAGS flag) {
return _strings[flag_to_index(flag)].human_readable;
// Map memory tag to human readable name
static const char* tag_to_name(MemTag mem_tag) {
return _strings[tag_to_index(mem_tag)].human_readable;
}
// Map memory type to literalized enum name (e.g. "mtTest")
static const char* flag_to_enum_name(MEMFLAGS flag) {
return _strings[flag_to_index(flag)].enum_s;
// Map memory tag to literalized enum name (e.g. "mtTest")
static const char* tag_to_enum_name(MemTag mem_tag) {
return _strings[tag_to_index(mem_tag)].enum_s;
}
// Map an index to memory type
static MEMFLAGS index_to_flag(int index) {
assert(flag_index_is_valid(index), "Invalid flag index (%d)", index);
return static_cast<MEMFLAGS>(index);
// Map an index to memory tag
static MemTag index_to_tag(int index) {
assert(tag_index_is_valid(index), "Invalid type index (%d)", index);
return static_cast<MemTag>(index);
}
// Memory size scale
@ -121,10 +121,10 @@ class NMTUtil : AllStatic {
// string is not a valid level.
static NMT_TrackingLevel parse_tracking_level(const char* s);
// Given a string, return associated flag. mtNone if name is invalid.
// Given a string, return associated mem_tag. mtNone if name is invalid.
// String can be either the human readable name or the
// stringified enum (with or without leading "mt". In all cases, case is ignored.
static MEMFLAGS string_to_flag(const char* name);
static MemTag string_to_mem_tag(const char* name);
// Returns textual representation of a tracking level.
static const char* tracking_level_to_string(NMT_TrackingLevel level);
@ -134,7 +134,7 @@ class NMTUtil : AllStatic {
const char* enum_s; // e.g. "mtNMT"
const char* human_readable; // e.g. "Native Memory Tracking"
};
static S _strings[mt_number_of_types];
static S _strings[mt_number_of_tags];
};

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2022, 2023 SAP SE. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -198,8 +198,8 @@ void NMTPreInit::create_table() {
}
// Allocate with os::malloc (hidden to prevent having to include os.hpp)
void* NMTPreInit::do_os_malloc(size_t size, MEMFLAGS memflags) {
return os::malloc(size, memflags);
void* NMTPreInit::do_os_malloc(size_t size, MemTag mem_tag) {
return os::malloc(size, mem_tag);
}
// Switches from NMT pre-init state to NMT post-init state;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2022, 2023 SAP SE. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -255,7 +255,7 @@ class NMTPreInit : public AllStatic {
}
// Just a wrapper for os::malloc to avoid including os.hpp here.
static void* do_os_malloc(size_t size, MEMFLAGS memflags);
static void* do_os_malloc(size_t size, MemTag mem_tag);
public:
@ -283,7 +283,7 @@ public:
// Called from os::realloc.
// Returns true if reallocation was handled here; in that case,
// *rc contains the return address.
static bool handle_realloc(void** rc, void* old_p, size_t new_size, MEMFLAGS memflags) {
static bool handle_realloc(void** rc, void* old_p, size_t new_size, MemTag mem_tag) {
if (old_p == nullptr) { // realloc(null, n)
return handle_malloc(rc, new_size);
}
@ -322,7 +322,7 @@ public:
// and confusing us.
const NMTPreInitAllocation* a = find_in_map(old_p);
if (a != nullptr) { // this was originally a pre-init allocation
void* p_new = do_os_malloc(new_size, memflags);
void* p_new = do_os_malloc(new_size, mem_tag);
::memcpy(p_new, a->payload, MIN2(a->size, new_size));
(*rc) = p_new;
return true;

View File

@ -57,9 +57,9 @@ void NMTUsage::update_malloc_usage() {
const MallocMemorySnapshot* ms = MallocMemorySummary::as_snapshot();
size_t total_arena_size = 0;
for (int i = 0; i < mt_number_of_types; i++) {
MEMFLAGS flag = NMTUtil::index_to_flag(i);
const MallocMemory* mm = ms->by_type(flag);
for (int i = 0; i < mt_number_of_tags; i++) {
MemTag mem_tag = NMTUtil::index_to_tag(i);
const MallocMemory* mm = ms->by_type(mem_tag);
_malloc_by_type[i] = mm->malloc_size() + mm->arena_size();
total_arena_size += mm->arena_size();
}
@ -68,11 +68,11 @@ void NMTUsage::update_malloc_usage() {
_malloc_total = ms->total();
// Adjustment due to mtChunk double counting.
_malloc_by_type[NMTUtil::flag_to_index(mtChunk)] -= total_arena_size;
_malloc_by_type[NMTUtil::tag_to_index(mtChunk)] -= total_arena_size;
_malloc_total -= total_arena_size;
// Adjust mtNMT to include malloc overhead.
_malloc_by_type[NMTUtil::flag_to_index(mtNMT)] += ms->malloc_overhead();
_malloc_by_type[NMTUtil::tag_to_index(mtNMT)] += ms->malloc_overhead();
}
void NMTUsage::update_vm_usage() {
@ -81,9 +81,9 @@ void NMTUsage::update_vm_usage() {
// Reset total to allow recalculation.
_vm_total.committed = 0;
_vm_total.reserved = 0;
for (int i = 0; i < mt_number_of_types; i++) {
MEMFLAGS flag = NMTUtil::index_to_flag(i);
const VirtualMemory* vm = vms->by_type(flag);
for (int i = 0; i < mt_number_of_tags; i++) {
MemTag mem_tag = NMTUtil::index_to_tag(i);
const VirtualMemory* vm = vms->by_type(mem_tag);
_vm_by_type[i].reserved = vm->reserved();
_vm_by_type[i].committed = vm->committed();
@ -116,12 +116,12 @@ size_t NMTUsage::total_committed() const {
return _malloc_total + _vm_total.committed;
}
size_t NMTUsage::reserved(MEMFLAGS flag) const {
int index = NMTUtil::flag_to_index(flag);
size_t NMTUsage::reserved(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return _malloc_by_type[index] + _vm_by_type[index].reserved;
}
size_t NMTUsage::committed(MEMFLAGS flag) const {
int index = NMTUtil::flag_to_index(flag);
size_t NMTUsage::committed(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return _malloc_by_type[index] + _vm_by_type[index].committed;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,9 +41,9 @@ struct NMTUsageOptions {
class NMTUsage : public CHeapObj<mtNMT> {
private:
size_t _malloc_by_type[mt_number_of_types];
size_t _malloc_by_type[mt_number_of_tags];
size_t _malloc_total;
NMTUsagePair _vm_by_type[mt_number_of_types];
NMTUsagePair _vm_by_type[mt_number_of_tags];
NMTUsagePair _vm_total;
NMTUsageOptions _usage_options;
@ -61,8 +61,8 @@ public:
size_t total_reserved() const;
size_t total_committed() const;
size_t reserved(MEMFLAGS flag) const;
size_t committed(MEMFLAGS flag) const;
size_t reserved(MemTag mem_tag) const;
size_t committed(MemTag mem_tag) const;
};
#endif // SHARE_NMT_NMTUSAGE_HPP

View File

@ -142,7 +142,7 @@ bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const
// At this point the previous overlapping regions have been
// cleared, and the full region is guaranteed to be inserted.
VirtualMemorySummary::record_committed_memory(size, flag());
VirtualMemorySummary::record_committed_memory(size, mem_tag());
// Try to merge with prev and possibly next.
if (try_merge_with(prev, addr, size, stack)) {
@ -212,14 +212,14 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
crgn = head->data();
if (crgn->same_region(addr, sz)) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
_committed_regions.remove_after(prev);
return true;
}
// del_rgn contains crgn
if (del_rgn.contain_region(crgn->base(), crgn->size())) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
head = head->next();
_committed_regions.remove_after(prev);
continue; // don't update head or prev
@ -230,20 +230,20 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
if (crgn->contain_address(end - 1)) {
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag());
return remove_uncommitted_region(head, addr, sz); // done!
} else {
// (2) Did not find del_rgn's end in crgn.
size_t size = crgn->end() - del_rgn.base();
crgn->exclude_region(addr, size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
}
} else if (crgn->contain_address(end - 1)) {
// Found del_rgn's end, but not its base addr.
size_t size = del_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
return true; // should be done if the list is sorted properly!
}
@ -292,19 +292,19 @@ size_t ReservedMemoryRegion::committed_size() const {
return committed;
}
void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
assert((flag() == mtNone || flag() == f),
"Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f);
if (flag() != f) {
VirtualMemorySummary::move_reserved_memory(flag(), f, size());
VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
_flag = f;
void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) {
assert((mem_tag() == mtNone || mem_tag() == new_mem_tag),
"Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag);
if (mem_tag() != new_mem_tag) {
VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size());
VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size());
_mem_tag = new_mem_tag;
}
}
address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
assert(flag() == mtThreadStack, "Only for thread stack");
assert(mem_tag() == mtThreadStack, "Only for thread stack");
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
address bottom = base();
address top = base() + size();
@ -334,26 +334,26 @@ bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
}
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag) {
const NativeCallStack& stack, MemTag mem_tag) {
assert(base_addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != nullptr, "Sanity check");
ReservedMemoryRegion rgn(base_addr, size, stack, flag);
ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
rgn.flag_name(), p2i(rgn.base()), rgn.size());
rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
if (reserved_rgn == nullptr) {
VirtualMemorySummary::record_reserved_memory(size, flag);
VirtualMemorySummary::record_reserved_memory(size, mem_tag);
return _reserved_regions->add(rgn) != nullptr;
} else {
// Deal with recursive reservation
// os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
// See JDK-8198226.
if (reserved_rgn->same_region(base_addr, size) &&
(reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) {
(reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) {
reserved_rgn->set_call_stack(stack);
reserved_rgn->set_flag(flag);
reserved_rgn->set_mem_tag(mem_tag);
return true;
} else {
assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
@ -362,16 +362,16 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
// It can happen when the regions are thread stacks, as JNI
// thread does not detach from VM before exits, and leads to
// leak JavaThread object
if (reserved_rgn->flag() == mtThreadStack) {
if (reserved_rgn->mem_tag() == mtThreadStack) {
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
// Overwrite with new region
// Release old region
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag());
// Add new region
VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag);
*reserved_rgn = rgn;
return true;
@ -380,27 +380,27 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
// CDS mapping region.
// CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
// NMT reports CDS as a whole.
if (reserved_rgn->flag() == mtClassShared) {
if (reserved_rgn->mem_tag() == mtClassShared) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
return true;
}
// Mapped CDS string region.
// The string region(s) is part of the java heap.
if (reserved_rgn->flag() == mtJavaHeap) {
if (reserved_rgn->mem_tag() == mtJavaHeap) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
return true;
}
// Print some more details. Don't use UL here to avoid circularities.
tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n"
" new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
p2i(base_addr), p2i(base_addr + size), (unsigned)flag);
tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.\n"
" new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.",
p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->mem_tag(),
p2i(base_addr), p2i(base_addr + size), (unsigned)mem_tag);
if (MemTracker::tracking_level() == NMT_detail) {
tty->print_cr("Existing region allocated from:");
reserved_rgn->call_stack()->print_on(tty);
@ -413,7 +413,7 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
}
}
void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) {
assert(addr != nullptr, "Invalid address");
assert(_reserved_regions != nullptr, "Sanity check");
@ -421,10 +421,10 @@ void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag)
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
if (reserved_rgn != nullptr) {
assert(reserved_rgn->contain_address(addr), "Containment");
if (reserved_rgn->flag() != flag) {
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
NMTUtil::flag_to_name(reserved_rgn->flag()));
reserved_rgn->set_flag(flag);
if (reserved_rgn->mem_tag() != mem_tag) {
assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")",
NMTUtil::tag_to_name(reserved_rgn->mem_tag()));
reserved_rgn->set_mem_tag(mem_tag);
}
}
}
@ -440,13 +440,13 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
if (reserved_rgn == nullptr) {
log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
rgn.flag_name(), p2i(rgn.base()), rgn.size());
rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
}
assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
bool result = reserved_rgn->add_committed_region(addr, size, stack);
log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
reserved_rgn->flag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
return result;
}
@ -459,10 +459,10 @@ bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size)
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size);
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
const char* flag_name = reserved_rgn->flag_name(); // after remove, info is not complete
const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete
bool result = reserved_rgn->remove_uncommitted_region(addr, size);
log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
flag_name, p2i(addr), size, (result ? " Succeeded" : "Failed"));
type_name, p2i(addr), size, (result ? " Succeeded" : "Failed"));
return result;
}
@ -474,15 +474,15 @@ bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
ReservedMemoryRegion backup(*rgn);
bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
if (!result) {
return false;
}
VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag());
result = _reserved_regions->remove(*rgn);
log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" ,
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
return result;
}
@ -508,7 +508,7 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
return false;
}
if (reserved_rgn->flag() == mtClassShared) {
if (reserved_rgn->mem_tag() == mtClassShared) {
if (reserved_rgn->contain_region(addr, size)) {
// This is an unmapped CDS region, which is part of the reserved shared
// memory region.
@ -523,14 +523,14 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
(size - reserved_rgn->size()));
ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
assert(cls_rgn != nullptr, "Class space region not recorded?");
assert(cls_rgn->flag() == mtClass, "Must be class type");
assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag");
remove_released_region(reserved_rgn);
remove_released_region(cls_rgn);
return true;
}
}
VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag());
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
if (reserved_rgn->base() == addr ||
@ -541,7 +541,7 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
address top = reserved_rgn->end();
address high_base = addr + size;
ReservedMemoryRegion high_rgn(high_base, top - high_base,
*reserved_rgn->call_stack(), reserved_rgn->flag());
*reserved_rgn->call_stack(), reserved_rgn->mem_tag());
// use original region for lower region
reserved_rgn->exclude_region(addr, top - addr);
@ -557,8 +557,8 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
// Given an existing memory mapping registered with NMT, split the mapping in
// two. The newly created two mappings will be registered under the call
// stack and the memory flags of the original section.
bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) {
// stack and the memory tags of the original section.
bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
@ -567,15 +567,15 @@ bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size
assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
NativeCallStack original_stack = *reserved_rgn->call_stack();
MEMFLAGS original_flags = reserved_rgn->flag();
MemTag original_tag = reserved_rgn->mem_tag();
const char* name = reserved_rgn->flag_name();
const char* name = reserved_rgn->mem_tag_name();
remove_released_region(reserved_rgn);
log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") with size " SIZE_FORMAT,
name, p2i(rgn.base()), rgn.size(), split);
// Now, create two new regions.
add_reserved_region(addr, split, original_stack, flag);
add_reserved_region(addr + split, size - split, original_stack, split_flag);
add_reserved_region(addr, split, original_stack, mem_tag);
add_reserved_region(addr + split, size - split, original_stack, split_tag);
return true;
}
@ -621,7 +621,7 @@ public:
SnapshotThreadStackWalker() {}
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->flag() == mtThreadStack) {
if (rgn->mem_tag() == mtThreadStack) {
address stack_bottom = rgn->thread_stack_uncommitted_bottom();
address committed_start;
size_t committed_size;
@ -688,7 +688,7 @@ public:
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->contain_address(_p)) {
_st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag()));
p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag()));
if (MemTracker::tracking_level() == NMT_detail) {
_stackprinter.print_stack(rgn->call_stack());
_st->cr();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,8 +77,8 @@ class VirtualMemory {
class VirtualMemoryAllocationSite : public AllocationSite {
VirtualMemory _c;
public:
VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) :
AllocationSite(stack, flag) { }
VirtualMemoryAllocationSite(const NativeCallStack& stack, MemTag mem_tag) :
AllocationSite(stack, mem_tag) { }
inline void reserve_memory(size_t sz) { _c.reserve_memory(sz); }
inline void commit_memory (size_t sz) { _c.commit_memory(sz); }
@ -95,22 +95,22 @@ class VirtualMemorySnapshot : public ResourceObj {
friend class VirtualMemorySummary;
private:
VirtualMemory _virtual_memory[mt_number_of_types];
VirtualMemory _virtual_memory[mt_number_of_tags];
public:
inline VirtualMemory* by_type(MEMFLAGS flag) {
int index = NMTUtil::flag_to_index(flag);
inline VirtualMemory* by_type(MemTag mem_tag) {
int index = NMTUtil::tag_to_index(mem_tag);
return &_virtual_memory[index];
}
inline const VirtualMemory* by_type(MEMFLAGS flag) const {
int index = NMTUtil::flag_to_index(flag);
inline const VirtualMemory* by_type(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return &_virtual_memory[index];
}
inline size_t total_reserved() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
for (int index = 0; index < mt_number_of_tags; index ++) {
amount += _virtual_memory[index].reserved();
}
return amount;
@ -118,14 +118,14 @@ class VirtualMemorySnapshot : public ResourceObj {
inline size_t total_committed() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
for (int index = 0; index < mt_number_of_tags; index ++) {
amount += _virtual_memory[index].committed();
}
return amount;
}
void copy_to(VirtualMemorySnapshot* s) {
for (int index = 0; index < mt_number_of_types; index ++) {
for (int index = 0; index < mt_number_of_tags; index ++) {
s->_virtual_memory[index] = _virtual_memory[index];
}
}
@ -134,32 +134,32 @@ class VirtualMemorySnapshot : public ResourceObj {
class VirtualMemorySummary : AllStatic {
public:
static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->reserve_memory(size);
static inline void record_reserved_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->reserve_memory(size);
}
static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->commit_memory(size);
static inline void record_committed_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->commit_memory(size);
}
static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->uncommit_memory(size);
static inline void record_uncommitted_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->uncommit_memory(size);
}
static inline void record_released_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->release_memory(size);
static inline void record_released_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->release_memory(size);
}
// Move virtual memory from one memory type to another.
// Virtual memory can be reserved before it is associated with a memory type, and tagged
// Move virtual memory from one memory tag to another.
// Virtual memory can be reserved before it is associated with a memory tag, and tagged
// as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
// type to specified memory type.
static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
// type to specified memory tag.
static inline void move_reserved_memory(MemTag from, MemTag to, size_t size) {
as_snapshot()->by_type(from)->release_memory(size);
as_snapshot()->by_type(to)->reserve_memory(size);
}
static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
static inline void move_committed_memory(MemTag from, MemTag to, size_t size) {
as_snapshot()->by_type(from)->uncommit_memory(size);
as_snapshot()->by_type(to)->commit_memory(size);
}
@ -293,16 +293,16 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
_committed_regions;
NativeCallStack _stack;
MEMFLAGS _flag;
MemTag _mem_tag;
public:
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) :
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { }
MemTag mem_tag = mtNone) :
VirtualMemoryRegion(base, size), _stack(stack), _mem_tag(mem_tag) { }
ReservedMemoryRegion(address base, size_t size) :
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone) { }
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _mem_tag(mtNone) { }
// Copy constructor
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
@ -313,8 +313,8 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
void set_flag(MEMFLAGS flag);
inline MEMFLAGS flag() const { return _flag; }
void set_mem_tag(MemTag mem_tag);
inline MemTag mem_tag() const { return _mem_tag; }
// uncommitted thread stack bottom, above guard pages if there is any.
address thread_stack_uncommitted_bottom() const;
@ -336,8 +336,8 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
set_base(other.base());
set_size(other.size());
_stack = *other.call_stack();
_flag = other.flag();
_stack = *other.call_stack();
_mem_tag = other.mem_tag();
_committed_regions.clear();
CommittedRegionIterator itr = other.iterate_committed_regions();
@ -350,7 +350,7 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
return *this;
}
const char* flag_name() const { return NMTUtil::flag_to_name(_flag); }
const char* mem_tag_name() const { return NMTUtil::tag_to_name(_mem_tag); }
private:
// The committed region contains the uncommitted region, subtract the uncommitted
@ -380,18 +380,18 @@ class VirtualMemoryTracker : AllStatic {
public:
static bool initialize(NMT_TrackingLevel level);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag = mtNone);
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static bool remove_uncommitted_region (address base_addr, size_t size);
static bool remove_released_region (address base_addr, size_t size);
static bool remove_released_region (ReservedMemoryRegion* rgn);
static void set_reserved_region_type (address addr, MEMFLAGS flag);
static void set_reserved_region_type (address addr, MemTag mem_tag);
// Given an existing memory mapping registered with NMT, split the mapping in
// two. The newly created two mappings will be registered under the call
// stack and the memory flags of the original section.
static bool split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag);
// stack and the memory tag of the original section.
static bool split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_type);
// Walk virtual memory data structure for creating baseline, etc.
static bool walk_virtual_memory(VirtualMemoryWalker* walker);

View File

@ -83,8 +83,8 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType
} else {
// If the state is not matching then we have different operations, such as:
// reserve [x1, A); ... commit [A, x2); or
// reserve [x1, A), flag1; ... reserve [A, x2), flag2; or
// reserve [A, x1), flag1; ... reserve [A, x2), flag2;
// reserve [x1, A), mem_tag1; ... reserve [A, x2), mem_tag2; or
// reserve [A, x1), mem_tag1; ... reserve [A, x2), mem_tag2;
// then we re-use the existing out node, overwriting its old metadata.
leqA_n->val() = stA;
}
@ -147,7 +147,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType
if (to_be_deleted_inbetween_a_b.length() == 0 && LEQ_A_found) {
// We must have smashed a hole in an existing region (or replaced it entirely).
// LEQ_A < A < B <= C
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(LEQ_A.out().flag())];
SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(LEQ_A.out().mem_tag())];
if (LEQ_A.out().type() == StateType::Reserved) {
rescom.reserve -= B - A;
} else if (LEQ_A.out().type() == StateType::Committed) {
@ -163,7 +163,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType
_tree.remove(delete_me.address);
// Perform summary accounting
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(delete_me.in().flag())];
SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(delete_me.in().mem_tag())];
if (delete_me.in().type() == StateType::Reserved) {
rescom.reserve -= delete_me.address - prev.address;
} else if (delete_me.in().type() == StateType::Committed) {
@ -178,17 +178,17 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType
// A - prev - B - (some node >= B)
// It might be that prev.address == B == (some node >= B), this is fine.
if (prev.out().type() == StateType::Reserved) {
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())];
SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(prev.out().mem_tag())];
rescom.reserve -= B - prev.address;
} else if (prev.out().type() == StateType::Committed) {
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())];
SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(prev.out().mem_tag())];
rescom.commit -= B - prev.address;
rescom.reserve -= B - prev.address;
}
}
// Finally, we can register the new region [A, B)'s summary data.
SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(metadata.flag)];
SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(metadata.mem_tag)];
if (state == StateType::Reserved) {
rescom.reserve += B - A;
} else if (state == StateType::Committed) {

View File

@ -35,7 +35,7 @@
// A VMATree stores a sequence of points on the natural number line.
// Each of these points stores information about a state change.
// For example, the state may go from released memory to committed memory,
// or from committed memory of a certain MEMFLAGS to committed memory of a different MEMFLAGS.
// or from committed memory of a certain MemTag to committed memory of a different MemTag.
// The set of points is stored in a balanced binary tree for efficient querying and updating.
class VMATree {
friend class NMTVMATreeTest;
@ -69,15 +69,15 @@ public:
// Each point has some stack and a flag associated with it.
struct RegionData {
const NativeCallStackStorage::StackIndex stack_idx;
const MEMFLAGS flag;
const MemTag mem_tag;
RegionData() : stack_idx(), flag(mtNone) {}
RegionData() : stack_idx(), mem_tag(mtNone) {}
RegionData(NativeCallStackStorage::StackIndex stack_idx, MEMFLAGS flag)
: stack_idx(stack_idx), flag(flag) {}
RegionData(NativeCallStackStorage::StackIndex stack_idx, MemTag mem_tag)
: stack_idx(stack_idx), mem_tag(mem_tag) {}
static bool equals(const RegionData& a, const RegionData& b) {
return a.flag == b.flag &&
return a.mem_tag == b.mem_tag &&
NativeCallStackStorage::equals(a.stack_idx, b.stack_idx);
}
};
@ -87,16 +87,16 @@ public:
private:
struct IntervalState {
private:
// Store the type and flag as two bytes
// Store the type and mem_tag as two bytes
uint8_t type_flag[2];
NativeCallStackStorage::StackIndex sidx;
public:
IntervalState() : type_flag{0,0}, sidx() {}
IntervalState(const StateType type, const RegionData data) {
assert(!(type == StateType::Released) || data.flag == mtNone, "Released type must have flag mtNone");
assert(!(type == StateType::Released) || data.mem_tag == mtNone, "Released type must have memory tag mtNone");
type_flag[0] = static_cast<uint8_t>(type);
type_flag[1] = static_cast<uint8_t>(data.flag);
type_flag[1] = static_cast<uint8_t>(data.mem_tag);
sidx = data.stack_idx;
}
@ -104,12 +104,12 @@ private:
return static_cast<StateType>(type_flag[0]);
}
MEMFLAGS flag() const {
return static_cast<MEMFLAGS>(type_flag[1]);
MemTag mem_tag() const {
return static_cast<MemTag>(type_flag[1]);
}
RegionData regiondata() const {
return RegionData{sidx, flag()};
return RegionData{sidx, mem_tag()};
}
NativeCallStackStorage::StackIndex stack() const {
@ -159,10 +159,10 @@ public:
delta commit;
};
struct SummaryDiff {
SingleDiff flag[mt_number_of_types];
SingleDiff tag[mt_number_of_tags];
SummaryDiff() {
for (int i = 0; i < mt_number_of_types; i++) {
flag[i] = SingleDiff{0, 0};
for (int i = 0; i < mt_number_of_tags; i++) {
tag[i] = SingleDiff{0, 0};
}
}
};

View File

@ -2409,7 +2409,7 @@ static char* get_bad_address() {
if (bad_address != nullptr) {
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
/*is_committed*/false);
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
MemTracker::record_virtual_memory_tag((void*)bad_address, mtInternal);
}
}
return bad_address;

View File

@ -25,7 +25,7 @@
#ifndef SHARE_PRIMS_JVMTIAGENTLIST_HPP
#define SHARE_PRIMS_JVMTIAGENTLIST_HPP
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "prims/jvmtiAgent.hpp"
#include "utilities/growableArray.hpp"

View File

@ -676,7 +676,7 @@ WB_END
#endif // INCLUDE_G1GC
// Alloc memory using the test memory type so that we can use that to see if
// Alloc memory using the test memory tag so that we can use that to see if
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
@ -692,11 +692,11 @@ WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size,
return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
WB_END
// Alloc memory with pseudo call stack and specific memory type.
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStackAndType(JNIEnv* env, jobject o, jlong size, jint pseudo_stack, jint type))
// Alloc memory with pseudo call stack and specific memory tag.
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStackAndType(JNIEnv* env, jobject o, jlong size, jint pseudo_stack, jint mem_tag))
address pc = (address)(size_t)pseudo_stack;
NativeCallStack stack(&pc, 1);
return (jlong)(uintptr_t)os::malloc(size, (MEMFLAGS)type, stack);
return (jlong)(uintptr_t)os::malloc(size, (MemTag)mem_tag, stack);
WB_END
// Free the memory allocated by NMTAllocTest
@ -708,21 +708,21 @@ WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
addr = (jlong)(uintptr_t)os::reserve_memory(size);
MemTracker::record_virtual_memory_type((address)addr, mtTest);
MemTracker::record_virtual_memory_tag((address)addr, mtTest);
return addr;
WB_END
WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size))
addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size);
MemTracker::record_virtual_memory_type((address)addr, mtTest);
MemTracker::record_virtual_memory_tag((address)addr, mtTest);
return addr;
WB_END
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
MemTracker::record_virtual_memory_tag((address)(uintptr_t)addr, mtTest);
WB_END
WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))

View File

@ -187,7 +187,7 @@ class HandleArea: public Arena {
HandleArea* _prev; // link to outer (older) area
public:
// Constructor
HandleArea(MEMFLAGS flags, HandleArea* prev) : Arena(flags, Tag::tag_ha, Chunk::tiny_size) {
HandleArea(MemTag mem_tag, HandleArea* prev) : Arena(mem_tag, Tag::tag_ha, Chunk::tiny_size) {
debug_only(_handle_mark_nesting = 0);
debug_only(_no_handle_mark_nesting = 0);
_prev = prev;

View File

@ -409,8 +409,8 @@ void JavaThread::check_for_valid_safepoint_state() {
// A JavaThread is a normal Java thread
JavaThread::JavaThread(MEMFLAGS flags) :
Thread(flags),
JavaThread::JavaThread(MemTag mem_tag) :
Thread(mem_tag),
// Initialize fields
_on_thread_list(false),
DEBUG_ONLY(_java_call_counter(0) COMMA)
@ -634,7 +634,7 @@ void JavaThread::block_if_vm_exited() {
}
}
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz, MEMFLAGS flags) : JavaThread(flags) {
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz, MemTag mem_tag) : JavaThread(mem_tag) {
set_entry_point(entry_point);
// Create the native thread itself.
// %note runtime_23

View File

@ -479,8 +479,8 @@ private:
public:
// Constructor
JavaThread(MEMFLAGS flags = mtThread); // delegating constructor
JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MEMFLAGS flags = mtThread);
JavaThread(MemTag mem_tag = mtThread); // delegating constructor
JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
~JavaThread();
// Factory method to create a new JavaThread whose attach state is "is attaching"

View File

@ -29,7 +29,7 @@
#include "logging/log.hpp"
#include "memory/allStatic.hpp"
#include "memory/resourceArea.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/basicLock.inline.hpp"
@ -60,14 +60,14 @@ class ObjectMonitorTable : AllStatic {
}
static void* allocate_node(void* context, size_t size, Value const& value) {
ObjectMonitorTable::inc_items_count();
return AllocateHeap(size, MEMFLAGS::mtObjectMonitor);
return AllocateHeap(size, mtObjectMonitor);
};
static void free_node(void* context, void* memory, Value const& value) {
ObjectMonitorTable::dec_items_count();
FreeHeap(memory);
}
};
using ConcurrentTable = ConcurrentHashTable<Config, MEMFLAGS::mtObjectMonitor>;
using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
static ConcurrentTable* _table;
static volatile size_t _items_count;

View File

@ -600,16 +600,16 @@ bool os::find_builtin_agent(JvmtiAgent* agent, const char *syms[],
// --------------------- heap allocation utilities ---------------------
char *os::strdup(const char *str, MEMFLAGS flags) {
char *os::strdup(const char *str, MemTag mem_tag) {
size_t size = strlen(str);
char *dup_str = (char *)malloc(size + 1, flags);
char *dup_str = (char *)malloc(size + 1, mem_tag);
if (dup_str == nullptr) return nullptr;
strcpy(dup_str, str);
return dup_str;
}
char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
char* p = os::strdup(str, flags);
char* os::strdup_check_oom(const char* str, MemTag mem_tag) {
char* p = os::strdup(str, mem_tag);
if (p == nullptr) {
vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
}
@ -629,11 +629,11 @@ static void break_if_ptr_caught(void* ptr) {
}
#endif // ASSERT
void* os::malloc(size_t size, MEMFLAGS flags) {
return os::malloc(size, flags, CALLER_PC);
void* os::malloc(size_t size, MemTag mem_tag) {
return os::malloc(size, mem_tag, CALLER_PC);
}
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
void* os::malloc(size_t size, MemTag mem_tag, const NativeCallStack& stack) {
// Special handling for NMT preinit phase before arguments are parsed
void* rc = nullptr;
@ -651,7 +651,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
size = MAX2((size_t)1, size);
// Observe MallocLimit
if (MemTracker::check_exceeds_limit(size, memflags)) {
if (MemTracker::check_exceeds_limit(size, mem_tag)) {
return nullptr;
}
@ -667,7 +667,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
return nullptr;
}
void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);
void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, mem_tag, stack);
if (CDSConfig::is_dumping_static_archive()) {
// Need to deterministically fill all the alignment gaps in C++ structures.
@ -679,20 +679,20 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
return inner_ptr;
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
return os::realloc(memblock, size, flags, CALLER_PC);
void* os::realloc(void *memblock, size_t size, MemTag mem_tag) {
return os::realloc(memblock, size, mem_tag, CALLER_PC);
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
void* os::realloc(void *memblock, size_t size, MemTag mem_tag, const NativeCallStack& stack) {
// Special handling for NMT preinit phase before arguments are parsed
void* rc = nullptr;
if (NMTPreInit::handle_realloc(&rc, memblock, size, memflags)) {
if (NMTPreInit::handle_realloc(&rc, memblock, size, mem_tag)) {
return rc;
}
if (memblock == nullptr) {
return os::malloc(size, memflags, stack);
return os::malloc(size, mem_tag, stack);
}
DEBUG_ONLY(check_crash_protection());
@ -715,15 +715,15 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
const size_t old_size = MallocTracker::malloc_header(memblock)->size();
// Observe MallocLimit
if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, memflags)) {
if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, mem_tag)) {
return nullptr;
}
// Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it
// may invalidate the old block, including its header.
MallocHeader* header = MallocHeader::resolve_checked(memblock);
assert(memflags == header->flags(), "weird NMT flags mismatch (new:\"%s\" != old:\"%s\")\n",
NMTUtil::flag_to_name(memflags), NMTUtil::flag_to_name(header->flags()));
assert(mem_tag == header->mem_tag(), "weird NMT type mismatch (new:\"%s\" != old:\"%s\")\n",
NMTUtil::tag_to_name(mem_tag), NMTUtil::tag_to_name(header->mem_tag()));
const MallocHeader::FreeInfo free_info = header->free_info();
header->mark_block_as_dead();
@ -742,7 +742,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
// After a successful realloc(3), we account the resized block with its new size
// to NMT.
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack);
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, mem_tag, stack);
#ifdef ASSERT
assert(old_size == free_info.size, "Sanity");
@ -1871,10 +1871,10 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
return os::pd_create_stack_guard_pages(addr, bytes);
}
char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
char* os::reserve_memory(size_t bytes, bool executable, MemTag mem_tag) {
char* result = pd_reserve_memory(bytes, executable);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags);
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, mem_tag);
log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes));
} else {
log_info(os, map)("Reserve failed (%zu bytes)", bytes);
@ -1882,10 +1882,10 @@ char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
return result;
}
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MEMFLAGS flag) {
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MemTag mem_tag) {
char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, flag);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, mem_tag);
log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes));
} else {
log_info(os, map)("Attempt to reserve " RANGEFMT " failed",
@ -2235,31 +2235,31 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) {
}
}
char* os::map_memory_to_file(size_t bytes, int file_desc, MEMFLAGS flag) {
char* os::map_memory_to_file(size_t bytes, int file_desc, MemTag mem_tag) {
// Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
// but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
// On all current implementations null is interpreted as any available address.
char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC, flag);
MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC, mem_tag);
}
return result;
}
char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MEMFLAGS flag) {
char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MemTag mem_tag) {
char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flag);
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, mem_tag);
}
return result;
}
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec, MEMFLAGS flags) {
bool allow_exec, MemTag mem_tag) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags);
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, mem_tag);
}
return result;
}

View File

@ -450,14 +450,14 @@ class os: AllStatic {
inline static size_t cds_core_region_alignment();
// Reserves virtual memory.
static char* reserve_memory(size_t bytes, bool executable = false, MEMFLAGS flags = mtNone);
static char* reserve_memory(size_t bytes, bool executable = false, MemTag mem_tag = mtNone);
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false);
// Attempts to reserve the virtual memory at [addr, addr + bytes).
// Does not overwrite existing mappings.
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MEMFLAGS flag = mtNone);
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MemTag mem_tag = mtNone);
// Given an address range [min, max), attempts to reserve memory within this area, with the given alignment.
// If randomize is true, the location will be randomized.
@ -509,16 +509,16 @@ class os: AllStatic {
static int create_file_for_heap(const char* dir);
// Map memory to the file referred by fd. This function is slightly different from map_memory()
// and is added to be used for implementation of -XX:AllocateHeapAt
static char* map_memory_to_file(size_t size, int fd, MEMFLAGS flag = mtNone);
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag = mtNone);
static char* map_memory_to_file(size_t size, int fd, MemTag mem_tag = mtNone);
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag = mtNone);
static char* map_memory_to_file(char* base, size_t size, int fd);
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MEMFLAGS flag = mtNone);
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MemTag mem_tag = mtNone);
// Replace existing reserved memory with file mapping
static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only = false,
bool allow_exec = false, MEMFLAGS flags = mtNone);
bool allow_exec = false, MemTag mem_tag = mtNone);
static bool unmap_memory(char *addr, size_t bytes);
static void disclaim_memory(char *addr, size_t bytes);
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
@ -900,16 +900,16 @@ class os: AllStatic {
static int get_native_stack(address* stack, int size, int toSkip = 0);
// General allocation (must be MT-safe)
static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
static void* malloc (size_t size, MEMFLAGS flags);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
static void* malloc (size_t size, MemTag mem_tag, const NativeCallStack& stack);
static void* malloc (size_t size, MemTag mem_tag);
static void* realloc (void *memblock, size_t size, MemTag mem_tag, const NativeCallStack& stack);
static void* realloc (void *memblock, size_t size, MemTag mem_tag);
// handles null pointers
static void free (void *memblock);
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
static char* strdup(const char *, MemTag mem_tag = mtInternal); // Like strdup
// Like strdup, but exit VM when strdup() returns null
static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
static char* strdup_check_oom(const char*, MemTag mem_tag = mtInternal);
// SocketInterface (ex HPI SocketInterface )
static int socket_close(int fd);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ void SafepointMechanism::default_initialize() {
const size_t allocation_size = 2 * page_size;
char* polling_page = os::reserve_memory(allocation_size);
os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
MemTracker::record_virtual_memory_type((address)polling_page, mtSafepoint);
MemTracker::record_virtual_memory_tag((address)polling_page, mtSafepoint);
char* bad_page = polling_page;
char* good_page = polling_page + page_size;

View File

@ -64,7 +64,7 @@ THREAD_LOCAL Thread* Thread::_thr_current = nullptr;
DEBUG_ONLY(Thread* Thread::_starting_thread = nullptr;)
Thread::Thread(MEMFLAGS flags) {
Thread::Thread(MemTag mem_tag) {
DEBUG_ONLY(_run_state = PRE_CALL_RUN;)
@ -78,9 +78,9 @@ Thread::Thread(MEMFLAGS flags) {
// allocated data structures
set_osthread(nullptr);
set_resource_area(new (flags) ResourceArea(flags));
set_resource_area(new (mem_tag) ResourceArea(mem_tag));
DEBUG_ONLY(_current_resource_mark = nullptr;)
set_handle_area(new (flags) HandleArea(flags, nullptr));
set_handle_area(new (mem_tag) HandleArea(mem_tag, nullptr));
set_metadata_handles(new (mtClass) GrowableArray<Metadata*>(30, mtClass));
set_last_handle_mark(nullptr);
DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr);

View File

@ -277,7 +277,7 @@ class Thread: public ThreadShadow {
// is waiting to lock
public:
// Constructor
Thread(MEMFLAGS flag = mtThread);
Thread(MemTag mem_tag = mtThread);
virtual ~Thread() = 0; // Thread is abstract.
// Manage Thread::current()

View File

@ -32,7 +32,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "nmt/memflags.hpp"
#include "nmt/memTag.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayKlass.hpp"

Some files were not shown because too many files have changed in this diff Show More