8331540: [BACKOUT] NMT: add/make a mandatory MEMFLAGS argument to family of os::reserve/commit/uncommit memory API
Reviewed-by: jwilhelm
This commit is contained in:
parent
a10845b553
commit
f665e07ab2
@ -69,7 +69,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
|
||||
const uint64_t immediate = ((uint64_t)immediates[index]) << 32;
|
||||
assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate),
|
||||
"Invalid immediate %d " UINT64_FORMAT, index, immediate);
|
||||
result = os::attempt_reserve_memory_at((char*)immediate, size, !ExecMem, mtClass);
|
||||
result = os::attempt_reserve_memory_at((char*)immediate, size, false);
|
||||
if (result == nullptr) {
|
||||
log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate);
|
||||
}
|
||||
@ -112,7 +112,7 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
|
||||
if (result == nullptr) {
|
||||
constexpr size_t alignment = nth_bit(32);
|
||||
log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address");
|
||||
result = os::reserve_memory_aligned(size, alignment, !ExecMem, mtClass);
|
||||
result = os::reserve_memory_aligned(size, alignment, false);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1805,7 +1805,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
|
||||
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
|
||||
@ -1847,7 +1847,7 @@ bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, siz
|
||||
}
|
||||
|
||||
// Reserves and attaches a shared memory segment.
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
|
||||
@ -1996,7 +1996,7 @@ void os::large_page_init() {
|
||||
return; // Nothing to do. See query_multipage_support and friends.
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
|
||||
fatal("os::reserve_memory_special should not be called on AIX.");
|
||||
return nullptr;
|
||||
}
|
||||
@ -2015,7 +2015,7 @@ bool os::can_commit_large_page_memory() {
|
||||
return false;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = nullptr;
|
||||
|
||||
@ -2033,7 +2033,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
char* addr = nullptr;
|
||||
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,7 +78,7 @@ XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
|
||||
_initialized(false) {
|
||||
|
||||
// Reserve address space for backing memory
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity);
|
||||
if (_base == 0) {
|
||||
// Failed
|
||||
log_error_pd(gc)("Failed to reserve address space for backing memory");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -79,7 +79,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
|
||||
_initialized(false) {
|
||||
|
||||
// Reserve address space for backing memory
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity);
|
||||
if (_base == 0) {
|
||||
// Failed
|
||||
log_error_pd(gc)("Failed to reserve address space for backing memory");
|
||||
|
@ -1668,7 +1668,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
::madvise(addr, bytes, MADV_DONTNEED);
|
||||
}
|
||||
|
||||
@ -1766,13 +1766,13 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
|
||||
}
|
||||
|
||||
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
// If this is a growable mapping, remove the guard pages entirely by
|
||||
// munmap()ping them. If not, just call uncommit_memory().
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
@ -1809,7 +1809,7 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return anon_mmap(nullptr /* addr */, bytes, exec);
|
||||
}
|
||||
|
||||
@ -1869,7 +1869,7 @@ void os::large_page_init() {
|
||||
}
|
||||
|
||||
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
|
||||
fatal("os::reserve_memory_special should not be called on BSD.");
|
||||
return nullptr;
|
||||
}
|
||||
@ -1888,9 +1888,9 @@ bool os::can_commit_large_page_memory() {
|
||||
return false;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
|
||||
if (result != nullptr) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
@ -1902,7 +1902,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
|
@ -3023,14 +3023,14 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
// This method works by doing an mmap over an existing mmaping and effectively discarding
|
||||
// the existing pages. However it won't work for SHM-based large pages that cannot be
|
||||
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
||||
// small pages on top of the SHM segment. This method always works for small pages, so we
|
||||
// allow that in any case.
|
||||
if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) {
|
||||
commit_memory(addr, bytes, alignment_hint, !ExecMem, flag);
|
||||
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3637,7 +3637,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
// If this is a growable mapping, remove the guard pages entirely by
|
||||
@ -3653,7 +3653,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
@ -3757,7 +3757,7 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return anon_mmap(nullptr, bytes);
|
||||
}
|
||||
|
||||
@ -4214,7 +4214,7 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes,
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
|
||||
char* req_addr, bool exec, MEMFLAGS flag) {
|
||||
char* req_addr, bool exec) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
|
||||
char* const addr = reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
|
||||
@ -4249,9 +4249,9 @@ bool os::can_commit_large_page_memory() {
|
||||
return UseTransparentHugePages;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
|
||||
if (result != nullptr) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
@ -4263,7 +4263,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
@ -4655,7 +4655,7 @@ static void workaround_expand_exec_shield_cs_limit() {
|
||||
*/
|
||||
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
|
||||
(StackOverflow::stack_guard_zone_size() + page_size));
|
||||
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);
|
||||
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
|
||||
|
||||
if (codebuf == nullptr) {
|
||||
// JDK-8197429: There may be a stack gap of one megabyte between
|
||||
@ -4663,13 +4663,15 @@ static void workaround_expand_exec_shield_cs_limit() {
|
||||
// Linux kernel workaround for CVE-2017-1000364. If we failed to
|
||||
// map our codebuf, try again at an address one megabyte lower.
|
||||
hint -= 1 * M;
|
||||
codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);
|
||||
codebuf = os::attempt_reserve_memory_at(hint, page_size);
|
||||
}
|
||||
|
||||
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, ExecMem, mtInternal))) {
|
||||
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
|
||||
return; // No matter, we tried, best effort.
|
||||
}
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
|
||||
|
||||
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
|
||||
|
||||
// Some code to exec: the 'ret' instruction
|
||||
|
@ -395,9 +395,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
|
||||
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
|
||||
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
|
||||
// rather than unmapping and remapping the whole chunk to get requested alignment.
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec, MEMFLAGS flag) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
size_t extra_size = calculate_aligned_extra_size(size, alignment);
|
||||
char* extra_base = os::reserve_memory(extra_size, exec, flag);
|
||||
char* extra_base = os::reserve_memory(extra_size, exec);
|
||||
if (extra_base == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -421,7 +421,7 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_des
|
||||
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == nullptr) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -65,14 +65,14 @@ static char* backing_store_file_name = nullptr; // name of the backing store
|
||||
static char* create_standard_memory(size_t size) {
|
||||
|
||||
// allocate an aligned chuck of memory
|
||||
char* mapAddress = os::reserve_memory(size, !ExecMem, mtInternal);
|
||||
char* mapAddress = os::reserve_memory(size);
|
||||
|
||||
if (mapAddress == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem, mtInternal)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
|
@ -2755,7 +2755,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
addr = (address)((uintptr_t)addr &
|
||||
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
|
||||
os::commit_memory((char *)addr, thread->stack_base() - addr,
|
||||
!ExecMem, mtThreadStack);
|
||||
!ExecMem);
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
#endif
|
||||
@ -3117,9 +3117,8 @@ static bool numa_interleaving_init() {
|
||||
// Reasons for doing this:
|
||||
// * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
|
||||
// * UseNUMAInterleaving requires a separate node for each piece
|
||||
static char* allocate_pages_individually(size_t bytes, char* addr, DWORD alloc_type,
|
||||
static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
DWORD prot,
|
||||
MEMFLAGS flag,
|
||||
bool should_inject_error = false) {
|
||||
char * p_buf;
|
||||
// note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
|
||||
@ -3143,7 +3142,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD alloc_t
|
||||
PAGE_READWRITE);
|
||||
// If reservation failed, return null
|
||||
if (p_buf == nullptr) return nullptr;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes + chunk_size);
|
||||
|
||||
// we still need to round up to a page boundary (in case we are using large pages)
|
||||
@ -3185,13 +3184,13 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD alloc_t
|
||||
if (!UseNUMAInterleaving) {
|
||||
p_new = (char *) virtualAlloc(next_alloc_addr,
|
||||
bytes_to_rq,
|
||||
alloc_type,
|
||||
flags,
|
||||
prot);
|
||||
} else {
|
||||
// get the next node to use from the used_node_list
|
||||
assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
|
||||
DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
|
||||
p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, alloc_type, prot, node);
|
||||
p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3204,7 +3203,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD alloc_t
|
||||
// need to create a dummy 'reserve' record to match
|
||||
// the release.
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||
bytes_to_release, CALLER_PC, flag);
|
||||
bytes_to_release, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes_to_release);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -3221,10 +3220,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD alloc_t
|
||||
}
|
||||
// Although the memory is allocated individually, it is returned as one.
|
||||
// NMT records it as one block.
|
||||
if ((alloc_type & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC, flag);
|
||||
if ((flags & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
|
||||
} else {
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
// made it this far, success
|
||||
@ -3352,7 +3351,7 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in
|
||||
// Multiple threads can race in this code but it's not possible to unmap small sections of
|
||||
// virtual space to get requested alignment, like posix-like os's.
|
||||
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag) {
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag = mtNone) {
|
||||
assert(is_aligned(alignment, os::vm_allocation_granularity()),
|
||||
"Alignment must be a multiple of allocation granularity (page size)");
|
||||
assert(is_aligned(size, os::vm_allocation_granularity()),
|
||||
@ -3366,7 +3365,7 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
|
||||
for (int attempt = 0; attempt < max_attempts && aligned_base == nullptr; attempt ++) {
|
||||
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, flag) :
|
||||
os::reserve_memory(extra_size, !ExecMem, flag);
|
||||
os::reserve_memory(extra_size, false, flag);
|
||||
if (extra_base == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -3383,7 +3382,7 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
|
||||
// Which may fail, hence the loop.
|
||||
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, flag) :
|
||||
os::attempt_reserve_memory_at(aligned_base, size, !ExecMem, flag);
|
||||
os::attempt_reserve_memory_at(aligned_base, size, false, flag);
|
||||
}
|
||||
|
||||
assert(aligned_base != nullptr, "Did not manage to re-map after %d attempts?", max_attempts);
|
||||
@ -3391,22 +3390,22 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec, MEMFLAGS flag) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
// exec can be ignored
|
||||
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */, flag);
|
||||
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
|
||||
}
|
||||
|
||||
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag) {
|
||||
return map_or_reserve_memory_aligned(size, alignment, fd, flag);
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
return pd_attempt_reserve_memory_at(nullptr /* addr */, bytes, exec, flag);
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return pd_attempt_reserve_memory_at(nullptr /* addr */, bytes, exec);
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
|
||||
assert((size_t)addr % os::vm_allocation_granularity() == 0,
|
||||
"reserve alignment");
|
||||
assert(bytes % os::vm_page_size() == 0, "reserve page size");
|
||||
@ -3421,7 +3420,7 @@ char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec, MEMF
|
||||
if (Verbose && PrintMiscellaneous) reserveTimer.start();
|
||||
// in numa interleaving, we have to allocate pages individually
|
||||
// (well really chunks of NUMAInterleaveGranularity size)
|
||||
res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE, flag);
|
||||
res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
|
||||
if (res == nullptr) {
|
||||
warning("NUMA page allocation failed");
|
||||
}
|
||||
@ -3442,7 +3441,7 @@ size_t os::vm_min_address() {
|
||||
return _vm_min_address_default;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
return map_memory_to_file(requested_addr, bytes, file_desc);
|
||||
}
|
||||
@ -3458,13 +3457,13 @@ bool os::can_commit_large_page_memory() {
|
||||
return false;
|
||||
}
|
||||
|
||||
static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec, MEMFLAGS flag) {
|
||||
static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec) {
|
||||
log_debug(pagesize)("Reserving large pages individually.");
|
||||
|
||||
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
const DWORD alloc_type = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||
|
||||
char * p_buf = allocate_pages_individually(size, req_addr, alloc_type, prot, flag, LargePagesIndividualAllocationInjectError);
|
||||
char * p_buf = allocate_pages_individually(size, req_addr, flags, prot, LargePagesIndividualAllocationInjectError);
|
||||
if (p_buf == nullptr) {
|
||||
// give an appropriate warning message
|
||||
if (UseNUMAInterleaving) {
|
||||
@ -3488,12 +3487,12 @@ static char* reserve_large_pages_single_range(size_t size, char* req_addr, bool
|
||||
return (char *) virtualAlloc(req_addr, size, flags, prot);
|
||||
}
|
||||
|
||||
static char* reserve_large_pages(size_t size, char* req_addr, bool exec, MEMFLAGS flag) {
|
||||
static char* reserve_large_pages(size_t size, char* req_addr, bool exec) {
|
||||
// with large pages, there are two cases where we need to use Individual Allocation
|
||||
// 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
|
||||
// 2) NUMA Interleaving is enabled, in which case we use a different node for each page
|
||||
if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
|
||||
return reserve_large_pages_individually(size, req_addr, exec, flag);
|
||||
return reserve_large_pages_individually(size, req_addr, exec);
|
||||
}
|
||||
return reserve_large_pages_single_range(size, req_addr, exec);
|
||||
}
|
||||
@ -3510,7 +3509,7 @@ static char* find_aligned_address(size_t size, size_t alignment) {
|
||||
return aligned_addr;
|
||||
}
|
||||
|
||||
static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec, MEMFLAGS flag) {
|
||||
static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec) {
|
||||
log_debug(pagesize)("Reserving large pages at an aligned address, alignment=" SIZE_FORMAT "%s",
|
||||
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
|
||||
|
||||
@ -3523,7 +3522,7 @@ static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exe
|
||||
char* aligned_address = find_aligned_address(size, alignment);
|
||||
|
||||
// Try to do the large page reservation using the aligned address.
|
||||
aligned_address = reserve_large_pages(size, aligned_address, exec, flag);
|
||||
aligned_address = reserve_large_pages(size, aligned_address, exec);
|
||||
if (aligned_address != nullptr) {
|
||||
// Reservation at the aligned address succeeded.
|
||||
guarantee(is_aligned(aligned_address, alignment), "Must be aligned");
|
||||
@ -3536,7 +3535,7 @@ static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exe
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* addr,
|
||||
bool exec, MEMFLAGS flag) {
|
||||
bool exec) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
assert(page_size == os::large_page_size(), "Currently only support one large page size on Windows");
|
||||
assert(is_aligned(addr, alignment), "Must be");
|
||||
@ -3552,11 +3551,11 @@ char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_
|
||||
// ensure that the requested alignment is met. When there is a requested address
|
||||
// this solves it self, since it must be properly aligned already.
|
||||
if (addr == nullptr && alignment > page_size) {
|
||||
return reserve_large_pages_aligned(bytes, alignment, exec, flag);
|
||||
return reserve_large_pages_aligned(bytes, alignment, exec);
|
||||
}
|
||||
|
||||
// No additional requirements, just reserve the large pages.
|
||||
return reserve_large_pages(bytes, addr, exec, flag);
|
||||
return reserve_large_pages(bytes, addr, exec);
|
||||
}
|
||||
|
||||
bool os::pd_release_memory_special(char* base, size_t bytes) {
|
||||
@ -3722,11 +3721,11 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
|
||||
@ -3777,7 +3776,7 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
// memory, not a big deal anyway, as bytes less or equal than 64K
|
||||
if (!is_committed) {
|
||||
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
|
||||
mtInternal, "cannot commit protection page");
|
||||
"cannot commit protection page");
|
||||
}
|
||||
// One cannot use os::guard_memory() here, as on Win32 guard page
|
||||
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
|
||||
@ -3817,7 +3816,7 @@ bool os::unguard_memory(char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) { }
|
||||
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
|
||||
|
||||
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
|
||||
return page_size;
|
||||
@ -5105,6 +5104,9 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Record virtual memory allocation
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
|
||||
DWORD bytes_read;
|
||||
OVERLAPPED overlapped;
|
||||
overlapped.Offset = (DWORD)file_offset;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,14 +55,14 @@ typedef BOOL (WINAPI *SetSecurityDescriptorControlFnPtr)(
|
||||
static char* create_standard_memory(size_t size) {
|
||||
|
||||
// allocate an aligned chuck of memory
|
||||
char* mapAddress = os::reserve_memory(size, !ExecMem, mtInternal);
|
||||
char* mapAddress = os::reserve_memory(size);
|
||||
|
||||
if (mapAddress == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem, mtInternal)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ size_t ArchiveBuilder::estimate_archive_size() {
|
||||
|
||||
address ArchiveBuilder::reserve_buffer() {
|
||||
size_t buffer_size = estimate_archive_size();
|
||||
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size(), mtClassShared);
|
||||
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
|
@ -1710,9 +1710,9 @@ void FileMapInfo::close() {
|
||||
/*
|
||||
* Same as os::map_memory() but also pretouches if AlwaysPreTouch is enabled.
|
||||
*/
|
||||
static char* map_and_pretouch_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MEMFLAGS flags) {
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MEMFLAGS flags = mtNone) {
|
||||
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
|
||||
AlwaysPreTouch ? false : read_only,
|
||||
allow_exec, flags);
|
||||
@ -1741,7 +1741,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
// Replace old mapping with new one that is writable.
|
||||
char *base = os::map_memory(_fd, _full_path, r->file_offset(),
|
||||
addr, size, false /* !read_only */,
|
||||
r->allow_exec(), mtClassShared);
|
||||
r->allow_exec());
|
||||
close();
|
||||
// These have to be errors because the shared region is now unmapped.
|
||||
if (base == nullptr) {
|
||||
@ -1800,7 +1800,7 @@ bool FileMapInfo::read_region(int i, char* base, size_t size, bool do_commit) {
|
||||
log_info(cds)("Commit %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)%s",
|
||||
is_static() ? "static " : "dynamic", i, p2i(base), p2i(base + size),
|
||||
shared_region_name[i], r->allow_exec() ? " exec" : "");
|
||||
if (!os::commit_memory(base, size, r->allow_exec(), mtClassShared)) {
|
||||
if (!os::commit_memory(base, size, r->allow_exec())) {
|
||||
log_error(cds)("Failed to commit %s region #%d (%s)", is_static() ? "static " : "dynamic",
|
||||
i, shared_region_name[i]);
|
||||
return false;
|
||||
@ -1860,9 +1860,9 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
|
||||
// Note that this may either be a "fresh" mapping into unreserved address
|
||||
// space (Windows, first mapping attempt), or a mapping into pre-reserved
|
||||
// space (Posix). See also comment in MetaspaceShared::map_archives().
|
||||
char* base = map_and_pretouch_memory(_fd, _full_path, r->file_offset(),
|
||||
requested_addr, size, r->read_only(),
|
||||
r->allow_exec(), mtClassShared);
|
||||
char* base = map_memory(_fd, _full_path, r->file_offset(),
|
||||
requested_addr, size, r->read_only(),
|
||||
r->allow_exec(), mtClassShared);
|
||||
if (base != requested_addr) {
|
||||
log_info(cds)("Unable to map %s shared space at " INTPTR_FORMAT,
|
||||
shared_region_name[i], p2i(requested_addr));
|
||||
@ -1889,8 +1889,8 @@ char* FileMapInfo::map_bitmap_region() {
|
||||
}
|
||||
bool read_only = true, allow_exec = false;
|
||||
char* requested_addr = nullptr; // allow OS to pick any location
|
||||
char* bitmap_base = map_and_pretouch_memory(_fd, _full_path, r->file_offset(),
|
||||
requested_addr, r->used_aligned(), read_only, allow_exec, mtClassShared);
|
||||
char* bitmap_base = map_memory(_fd, _full_path, r->file_offset(),
|
||||
requested_addr, r->used_aligned(), read_only, allow_exec, mtClassShared);
|
||||
if (bitmap_base == nullptr) {
|
||||
log_info(cds)("failed to map relocation bitmap");
|
||||
return nullptr;
|
||||
@ -2176,11 +2176,10 @@ bool FileMapInfo::map_heap_region_impl() {
|
||||
|
||||
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_type()
|
||||
// for mapped region as it is part of the reserved java heap, which is already recorded.
|
||||
// So we pass the mtJavaHeap to tell MemTracker the type of the already tracked memory.
|
||||
char* addr = (char*)_mapped_heap_memregion.start();
|
||||
char* base = map_and_pretouch_memory(_fd, _full_path, r->file_offset(),
|
||||
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
|
||||
r->allow_exec(), mtJavaHeap);
|
||||
char* base = map_memory(_fd, _full_path, r->file_offset(),
|
||||
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
|
||||
r->allow_exec());
|
||||
if (base == nullptr || base != addr) {
|
||||
dealloc_heap_region();
|
||||
log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. "
|
||||
|
@ -269,7 +269,7 @@ void MetaspaceShared::initialize_for_static_dump() {
|
||||
SharedBaseAddress = (size_t)_requested_base_address;
|
||||
|
||||
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
|
||||
_symbol_rs = ReservedSpace(symbol_rs_size, mtClassShared);
|
||||
_symbol_rs = ReservedSpace(symbol_rs_size);
|
||||
if (!_symbol_rs.is_reserved()) {
|
||||
log_error(cds)("Unable to reserve memory for symbols: " SIZE_FORMAT " bytes.", symbol_rs_size);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
@ -1270,10 +1270,12 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
// Get the simple case out of the way first:
|
||||
// no compressed class space, simple allocation.
|
||||
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
|
||||
os::vm_page_size(), mtClassShared, (char*)base_address);
|
||||
os::vm_page_size(), (char*)base_address);
|
||||
if (archive_space_rs.is_reserved()) {
|
||||
assert(base_address == nullptr ||
|
||||
(address)archive_space_rs.base() == base_address, "Sanity");
|
||||
// Register archive space with NMT.
|
||||
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
|
||||
return archive_space_rs.base();
|
||||
}
|
||||
return nullptr;
|
||||
@ -1317,18 +1319,21 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
// via sequential file IO.
|
||||
address ccs_base = base_address + archive_space_size + gap_size;
|
||||
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
|
||||
os::vm_page_size(), mtClassShared, (char*)base_address);
|
||||
os::vm_page_size(), (char*)base_address);
|
||||
class_space_rs = ReservedSpace(class_space_size, class_space_alignment,
|
||||
os::vm_page_size(), mtClass, (char*)ccs_base);
|
||||
os::vm_page_size(), (char*)ccs_base);
|
||||
}
|
||||
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
|
||||
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
|
||||
return nullptr;
|
||||
}
|
||||
// NMT: fix up the space tags
|
||||
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
|
||||
MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
|
||||
} else {
|
||||
if (use_archive_base_addr && base_address != nullptr) {
|
||||
total_space_rs = ReservedSpace(total_range_size, archive_space_alignment,
|
||||
os::vm_page_size(), mtClassShared, (char*) base_address);
|
||||
os::vm_page_size(), (char*) base_address);
|
||||
} else {
|
||||
// We did not manage to reserve at the preferred address, or were instructed to relocate. In that
|
||||
// case we reserve wherever possible, but the start address needs to be encodable as narrow Klass
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -240,7 +240,7 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
|
||||
if (_fd < 0) {
|
||||
quit("Unable to open hashtable dump file", filename);
|
||||
}
|
||||
_base = os::map_memory(_fd, filename, 0, nullptr, _size, true, false, mtClassShared);
|
||||
_base = os::map_memory(_fd, filename, 0, nullptr, _size, true, false);
|
||||
if (_base == nullptr) {
|
||||
quit("Unable to map hashtable dump file", filename);
|
||||
}
|
||||
|
@ -1213,7 +1213,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
|
||||
size_t translation_factor) {
|
||||
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
// Allocate a new reserved space, preferring to use large pages.
|
||||
ReservedSpace rs(size, preferred_page_size, mtGC);
|
||||
ReservedSpace rs(size, preferred_page_size);
|
||||
size_t page_size = rs.page_size();
|
||||
G1RegionToSpaceMapper* result =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -135,14 +135,14 @@ void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pa
|
||||
char* start_addr = page_start(start);
|
||||
size_t size = num_pages * _page_size;
|
||||
|
||||
os::commit_memory_or_exit(start_addr, size, _page_size, !ExecMem, mtGC, "G1 virtual space");
|
||||
os::commit_memory_or_exit(start_addr, size, _page_size, false, "G1 virtual space");
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::commit_tail() {
|
||||
vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
|
||||
|
||||
char* const aligned_end_address = align_down(_high_boundary, _page_size);
|
||||
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), !ExecMem, mtGC, "G1 virtual space");
|
||||
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), false, "G1 virtual space");
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
|
||||
@ -201,7 +201,7 @@ void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_pa
|
||||
"Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
|
||||
|
||||
char* start_addr = page_start(start_page);
|
||||
os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)), !ExecMem, mtGC);
|
||||
os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,10 +43,12 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||
MEMFLAGS type) :
|
||||
_listener(nullptr),
|
||||
_storage(rs, used_size, page_size),
|
||||
_region_commit_map(rs.size() * commit_factor / region_granularity, type),
|
||||
_region_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
|
||||
_memory_type(type) {
|
||||
guarantee(is_power_of_2(page_size), "must be");
|
||||
guarantee(is_power_of_2(region_granularity), "must be");
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), type);
|
||||
}
|
||||
|
||||
// Used to manually signal a mapper to handle a set of regions as committed.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -227,7 +227,7 @@ void MutableNUMASpace::bias_region(MemRegion mr, uint lgrp_id) {
|
||||
// Then we uncommit the pages in the range.
|
||||
// The alignment_hint argument must be less than or equal to the small page
|
||||
// size if not using large pages or else this function does nothing.
|
||||
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align, mtGC);
|
||||
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align);
|
||||
// And make them local/first-touch biased.
|
||||
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), checked_cast<int>(lgrp_id));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_s
|
||||
size_t size = pointer_delta(end, start, sizeof(char));
|
||||
if (clear_space) {
|
||||
// Prefer page reallocation to migration.
|
||||
os::free_memory((char*)start, size, page_size, mtJavaHeap);
|
||||
os::free_memory((char*)start, size, page_size);
|
||||
}
|
||||
os::numa_make_global((char*)start, size);
|
||||
}
|
||||
|
@ -47,10 +47,11 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
|
||||
|
||||
// Do not use large-pages for the backing store. The one large page region
|
||||
// will be used for the heap proper.
|
||||
ReservedSpace backing_store(bytes_to_reserve, mtGC);
|
||||
ReservedSpace backing_store(bytes_to_reserve);
|
||||
if (!backing_store.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
|
||||
}
|
||||
MemTracker::record_virtual_memory_type(backing_store.base(), mtGC);
|
||||
|
||||
// We do not commit any memory initially
|
||||
_virtual_space.initialize(backing_store);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,11 +49,13 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
|
||||
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
|
||||
MAX2(page_sz, granularity);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz, mtGC);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
|
||||
const size_t used_page_sz = rs.page_size();
|
||||
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes,
|
||||
rs.base(), rs.size(), used_page_sz);
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
_virtual_space = new PSVirtualSpace(rs, page_sz);
|
||||
if (_virtual_space != nullptr && _virtual_space->expand_by(_reserved_byte_size)) {
|
||||
_region_start = covered_region.start();
|
||||
|
@ -440,10 +440,12 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
|
||||
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
|
||||
MAX2(page_sz, granularity);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz, mtGC);
|
||||
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
|
||||
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
|
||||
rs.size(), page_sz);
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
|
||||
if (vspace != 0) {
|
||||
if (vspace->expand_by(_reserved_byte_size)) {
|
||||
|
@ -78,7 +78,7 @@ bool PSVirtualSpace::expand_by(size_t bytes) {
|
||||
|
||||
char* const base_addr = committed_high_addr();
|
||||
bool result = special() ||
|
||||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem, mtGC);
|
||||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
|
||||
if (result) {
|
||||
_committed_high_addr += bytes;
|
||||
}
|
||||
@ -95,7 +95,7 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
|
||||
}
|
||||
|
||||
char* const base_addr = committed_high_addr() - bytes;
|
||||
bool result = special() || os::uncommit_memory(base_addr, bytes, !ExecMem, mtGC);
|
||||
bool result = special() || os::uncommit_memory(base_addr, bytes);
|
||||
if (result) {
|
||||
_committed_high_addr -= bytes;
|
||||
}
|
||||
|
@ -37,11 +37,13 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
|
||||
size_t init_word_size):
|
||||
_reserved(reserved) {
|
||||
size_t size = compute_size(reserved.word_size());
|
||||
ReservedSpace rs(size, mtGC);
|
||||
ReservedSpace rs(size);
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
||||
}
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
if (!_vs.initialize(rs, 0)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
||||
}
|
||||
|
@ -82,7 +82,9 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
|
||||
|
||||
const size_t rs_align = _page_size == os::vm_page_size() ? 0 :
|
||||
MAX2(_page_size, os::vm_allocation_granularity());
|
||||
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size, mtGC);
|
||||
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
|
||||
|
||||
os::trace_page_sizes("Card Table", num_bytes, num_bytes,
|
||||
heap_rs.base(), heap_rs.size(), _page_size);
|
||||
@ -164,7 +166,6 @@ void CardTable::resize_covered_region(MemRegion new_region) {
|
||||
delta.byte_size(),
|
||||
_page_size,
|
||||
!ExecMem,
|
||||
mtGCCardSet,
|
||||
"card table expansion");
|
||||
|
||||
memset(delta.start(), clean_card, delta.byte_size());
|
||||
@ -173,9 +174,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
|
||||
MemRegion delta = MemRegion(new_committed.end(),
|
||||
old_committed.word_size() - new_committed.word_size());
|
||||
bool res = os::uncommit_memory((char*)delta.start(),
|
||||
delta.byte_size(),
|
||||
!ExecMem,
|
||||
mtGCCardSet);
|
||||
delta.byte_size());
|
||||
assert(res, "uncommit should succeed");
|
||||
}
|
||||
|
||||
|
@ -57,19 +57,21 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
|
||||
// subsystem for mapping not-yet-written-to pages to a single physical backing page,
|
||||
// but this is not guaranteed, and would confuse NMT and other memory accounting tools.
|
||||
|
||||
MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
|
||||
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
if (!_map_space.special()) {
|
||||
// Commit entire pages that cover the heap cset map.
|
||||
char* bot_addr = align_down(_cset_map, page_size);
|
||||
char* top_addr = align_up(_cset_map + _map_size, page_size);
|
||||
os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), !ExecMem,
|
||||
mtGC, "Unable to commit collection set bitmap: heap");
|
||||
os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
|
||||
"Unable to commit collection set bitmap: heap");
|
||||
|
||||
// Commit the zero page, if not yet covered by heap cset map.
|
||||
if (bot_addr != _biased_cset_map) {
|
||||
os::commit_memory_or_exit(_biased_cset_map, page_size, !ExecMem,
|
||||
mtGC, "Unable to commit collection set bitmap: zero page");
|
||||
os::commit_memory_or_exit(_biased_cset_map, page_size, false,
|
||||
"Unable to commit collection set bitmap: zero page");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,8 +213,8 @@ jint ShenandoahHeap::initialize() {
|
||||
|
||||
ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
|
||||
if (!_heap_region_special) {
|
||||
os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, !ExecMem,
|
||||
mtGC, "Cannot commit heap memory");
|
||||
os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
|
||||
"Cannot commit heap memory");
|
||||
}
|
||||
|
||||
//
|
||||
@ -247,11 +247,12 @@ jint ShenandoahHeap::initialize() {
|
||||
"Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
|
||||
_bitmap_bytes_per_slice, bitmap_page_size);
|
||||
|
||||
ReservedSpace bitmap(_bitmap_size, bitmap_page_size, mtGC);
|
||||
ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Mark Bitmap",
|
||||
bitmap_size_orig, bitmap_page_size,
|
||||
bitmap.base(),
|
||||
bitmap.size(), bitmap.page_size());
|
||||
MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
|
||||
_bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
|
||||
_bitmap_region_special = bitmap.special();
|
||||
|
||||
@ -259,22 +260,23 @@ jint ShenandoahHeap::initialize() {
|
||||
align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
|
||||
bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
|
||||
if (!_bitmap_region_special) {
|
||||
os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, !ExecMem,
|
||||
mtGC, "Cannot commit bitmap memory");
|
||||
os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
|
||||
"Cannot commit bitmap memory");
|
||||
}
|
||||
|
||||
_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
|
||||
|
||||
if (ShenandoahVerify) {
|
||||
ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size, mtGC);
|
||||
ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Verify Bitmap",
|
||||
bitmap_size_orig, bitmap_page_size,
|
||||
verify_bitmap.base(),
|
||||
verify_bitmap.size(), verify_bitmap.page_size());
|
||||
if (!verify_bitmap.special()) {
|
||||
os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, !ExecMem,
|
||||
mtGC, "Cannot commit verification bitmap memory");
|
||||
os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
|
||||
"Cannot commit verification bitmap memory");
|
||||
}
|
||||
MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
|
||||
MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
|
||||
_verification_bit_map.initialize(_heap_region, verify_bitmap_region);
|
||||
_verifier = new ShenandoahVerifier(this, &_verification_bit_map);
|
||||
@ -290,11 +292,12 @@ jint ShenandoahHeap::initialize() {
|
||||
aux_bitmap_page_size = os::vm_page_size();
|
||||
}
|
||||
#endif
|
||||
ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size, mtGC);
|
||||
ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Aux Bitmap",
|
||||
bitmap_size_orig, aux_bitmap_page_size,
|
||||
aux_bitmap.base(),
|
||||
aux_bitmap.size(), aux_bitmap.page_size());
|
||||
MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
|
||||
_aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
|
||||
_aux_bitmap_region_special = aux_bitmap.special();
|
||||
_aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
|
||||
@ -307,14 +310,15 @@ jint ShenandoahHeap::initialize() {
|
||||
size_t region_storage_size = align_up(region_storage_size_orig,
|
||||
MAX2(region_page_size, os::vm_allocation_granularity()));
|
||||
|
||||
ReservedSpace region_storage(region_storage_size, region_page_size, mtGC);
|
||||
ReservedSpace region_storage(region_storage_size, region_page_size);
|
||||
os::trace_page_sizes_for_requested_size("Region Storage",
|
||||
region_storage_size_orig, region_page_size,
|
||||
region_storage.base(),
|
||||
region_storage.size(), region_storage.page_size());
|
||||
MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
|
||||
if (!region_storage.special()) {
|
||||
os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, !ExecMem,
|
||||
mtGC, "Cannot commit region memory");
|
||||
os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
|
||||
"Cannot commit region memory");
|
||||
}
|
||||
|
||||
// Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
|
||||
@ -332,7 +336,7 @@ jint ShenandoahHeap::initialize() {
|
||||
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
|
||||
char* req_addr = (char*)addr;
|
||||
assert(is_aligned(req_addr, cset_align), "Should be aligned");
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, mtGC, req_addr);
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
|
||||
if (cset_rs.is_reserved()) {
|
||||
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
@ -341,7 +345,7 @@ jint ShenandoahHeap::initialize() {
|
||||
}
|
||||
|
||||
if (_collection_set == nullptr) {
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size(), mtGC);
|
||||
cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
}
|
||||
os::trace_page_sizes_for_requested_size("Collection Set",
|
||||
@ -1410,7 +1414,7 @@ void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
|
||||
bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
|
||||
|
||||
if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), !ExecMem, mtGC)) {
|
||||
if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
|
||||
log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
|
||||
return false;
|
||||
}
|
||||
@ -1430,7 +1434,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
|
||||
}
|
||||
|
||||
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
|
||||
if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), !ExecMem, mtGC)) {
|
||||
if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
|
||||
log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
|
||||
}
|
||||
}
|
||||
@ -2260,7 +2264,7 @@ bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
|
||||
size_t len = _bitmap_bytes_per_slice;
|
||||
char* start = (char*) _bitmap_region.start() + off;
|
||||
|
||||
if (!os::commit_memory(start, len, !ExecMem, mtGC)) {
|
||||
if (!os::commit_memory(start, len, false)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2289,7 +2293,7 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
|
||||
size_t slice = r->index() / _bitmap_regions_per_slice;
|
||||
size_t off = _bitmap_bytes_per_slice * slice;
|
||||
size_t len = _bitmap_bytes_per_slice;
|
||||
if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len, !ExecMem, mtGC)) {
|
||||
if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -620,7 +620,7 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
|
||||
|
||||
void ShenandoahHeapRegion::do_commit() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, !ExecMem, mtJavaHeap)) {
|
||||
if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
|
||||
report_java_out_of_memory("Unable to commit region");
|
||||
}
|
||||
if (!heap->commit_bitmap_slice(this)) {
|
||||
@ -634,7 +634,7 @@ void ShenandoahHeapRegion::do_commit() {
|
||||
|
||||
void ShenandoahHeapRegion::do_uncommit() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes, !ExecMem, mtJavaHeap)) {
|
||||
if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
|
||||
report_java_out_of_memory("Unable to uncommit region");
|
||||
}
|
||||
if (!heap->uncommit_bitmap_slice(this)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -88,7 +88,7 @@ size_t XMarkStackSpace::expand_space() {
|
||||
old_size / M, new_size / M);
|
||||
|
||||
// Expand
|
||||
os::commit_memory_or_exit((char*)_end, expand_size, !ExecMem, mtGC, "Mark stack space");
|
||||
os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space");
|
||||
|
||||
return expand_size;
|
||||
}
|
||||
@ -105,7 +105,7 @@ size_t XMarkStackSpace::shrink_space() {
|
||||
old_size / M, new_size / M);
|
||||
|
||||
const uintptr_t shrink_start = _end - shrink_size;
|
||||
os::uncommit_memory((char*)shrink_start, shrink_size, !ExecMem, mtGC);
|
||||
os::uncommit_memory((char*)shrink_start, shrink_size, false /* executable */);
|
||||
}
|
||||
|
||||
return shrink_size;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -279,13 +279,13 @@ void XPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
|
||||
void XPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
|
||||
// From an NMT point of view we treat the first heap view (marked0) as committed
|
||||
const uintptr_t addr = XAddress::marked0(offset);
|
||||
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC, mtGC);
|
||||
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
|
||||
}
|
||||
|
||||
void XPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
|
||||
const uintptr_t addr = XAddress::marked0(offset);
|
||||
ThreadCritical tc;
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, size, mtGC);
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, size);
|
||||
}
|
||||
|
||||
void XPhysicalMemoryManager::alloc(XPhysicalMemory& pmem, size_t size) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -181,7 +181,8 @@ bool XVirtualMemoryManager::reserve(size_t max_capacity) {
|
||||
}
|
||||
|
||||
void XVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) {
|
||||
MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC, mtJavaHeap);
|
||||
MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap);
|
||||
}
|
||||
|
||||
bool XVirtualMemoryManager::is_initialized() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -87,7 +87,7 @@ size_t ZMarkStackSpace::expand_space() {
|
||||
old_size / M, new_size / M);
|
||||
|
||||
// Expand
|
||||
os::commit_memory_or_exit((char*)_end, expand_size, !ExecMem, mtGC, "Mark stack space");
|
||||
os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space");
|
||||
|
||||
return expand_size;
|
||||
}
|
||||
@ -104,7 +104,7 @@ size_t ZMarkStackSpace::shrink_space() {
|
||||
old_size / M, new_size / M);
|
||||
|
||||
const uintptr_t shrink_start = _end - shrink_size;
|
||||
os::uncommit_memory((char*)shrink_start, shrink_size, !ExecMem, mtGC);
|
||||
os::uncommit_memory((char*)shrink_start, shrink_size, false /* executable */);
|
||||
}
|
||||
|
||||
return shrink_size;
|
||||
|
@ -71,10 +71,10 @@ void ZNMT::process_fake_mapping(zoffset offset, size_t size, bool commit) {
|
||||
|
||||
// commit / uncommit memory
|
||||
if (commit) {
|
||||
MemTracker::record_virtual_memory_commit((void*)sub_range_addr, sub_range_size, CALLER_PC, mtGC);
|
||||
MemTracker::record_virtual_memory_commit((void*)sub_range_addr, sub_range_size, CALLER_PC);
|
||||
} else {
|
||||
ThreadCritical tc;
|
||||
MemTracker::record_virtual_memory_uncommit((address)sub_range_addr, sub_range_size, mtGC);
|
||||
MemTracker::record_virtual_memory_uncommit((address)sub_range_addr, sub_range_size);
|
||||
}
|
||||
|
||||
left_to_process -= sub_range_size;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,8 +104,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
|
||||
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
|
||||
_rs = ReservedSpace(reservation_size_request_bytes,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTracing);
|
||||
os::vm_page_size());
|
||||
if (!_rs.is_reserved()) {
|
||||
return false;
|
||||
}
|
||||
@ -118,6 +117,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
|
||||
_rs.base(),
|
||||
_rs.size(),
|
||||
os::vm_page_size());
|
||||
MemTracker::record_virtual_memory_type((address)_rs.base(), mtTracing);
|
||||
assert(is_aligned(_rs.base(), os::vm_page_size()), "invariant");
|
||||
assert(is_aligned(_rs.size(), os::vm_page_size()), "invariant");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,7 +63,7 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (os::commit_memory(addr, size, !ExecMem, flags)) {
|
||||
if (os::commit_memory(addr, size, !ExecMem)) {
|
||||
return (E*)addr;
|
||||
} else {
|
||||
os::release_memory(addr, size);
|
||||
@ -80,7 +80,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
os::commit_memory_or_exit(addr, size, !ExecMem, flags, "Allocator (commit)");
|
||||
os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
|
||||
|
||||
return (E*)addr;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -227,11 +227,13 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
|
||||
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
|
||||
|
||||
// reserve space for _segmap
|
||||
ReservedSpace seg_rs(reserved_segments_size, mtCode);
|
||||
ReservedSpace seg_rs(reserved_segments_size);
|
||||
if (!_segmap.initialize(seg_rs, committed_segments_size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
|
||||
|
||||
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
|
||||
assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
|
||||
assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
|
||||
|
@ -589,7 +589,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
|
||||
if (result == nullptr) {
|
||||
// Fallback: reserve anywhere
|
||||
log_debug(metaspace, map)("Trying anywhere...");
|
||||
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), !ExecMem, mtClass);
|
||||
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false);
|
||||
}
|
||||
|
||||
// Wrap resulting range in ReservedSpace
|
||||
@ -598,7 +598,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
|
||||
log_debug(metaspace, map)("Mapped at " PTR_FORMAT, p2i(result));
|
||||
assert(is_aligned(result, Metaspace::reserve_alignment()), "Alignment too small for metaspace");
|
||||
rs = ReservedSpace::space_for_range(result, size, Metaspace::reserve_alignment(),
|
||||
os::vm_page_size(), false, false, mtClass);
|
||||
os::vm_page_size(), false, false);
|
||||
} else {
|
||||
log_debug(metaspace, map)("Failed to map.");
|
||||
rs = ReservedSpace();
|
||||
@ -739,7 +739,7 @@ void Metaspace::global_initialize() {
|
||||
CompressedClassSpaceBaseAddress, Metaspace::reserve_alignment()));
|
||||
}
|
||||
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
|
||||
os::vm_page_size() /* large */, mtClass, (char*)base);
|
||||
os::vm_page_size() /* large */, (char*)base);
|
||||
if (rs.is_reserved()) {
|
||||
log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
|
||||
} else {
|
||||
@ -767,6 +767,9 @@ void Metaspace::global_initialize() {
|
||||
CompressedClassSpaceSize));
|
||||
}
|
||||
|
||||
// Mark class space as such
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
|
||||
|
||||
// Initialize space
|
||||
Metaspace::initialize_class_space(rs);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -78,7 +78,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
|
||||
reserve_limit, Metaspace::reserve_alignment_words());
|
||||
if (reserve_limit > 0) {
|
||||
// have reserve limit -> non-expandable context
|
||||
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size(), mtMetaspace);
|
||||
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
|
||||
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
|
||||
} else {
|
||||
// no reserve limit -> expandable vslist
|
||||
|
@ -109,7 +109,7 @@ bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
|
||||
}
|
||||
|
||||
// Commit...
|
||||
if (os::commit_memory((char*)p, word_size * BytesPerWord, !ExecMem, _rs.nmt_flag()) == false) {
|
||||
if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
|
||||
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
|
||||
}
|
||||
|
||||
// Uncommit...
|
||||
if (os::uncommit_memory((char*)p, word_size * BytesPerWord, !ExecMem, _rs.nmt_flag()) == false) {
|
||||
if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
|
||||
// Note: this can actually happen, since uncommit may increase the number of mappings.
|
||||
fatal("Failed to uncommit metaspace.");
|
||||
}
|
||||
@ -255,10 +255,11 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
|
||||
DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
|
||||
ReservedSpace rs(word_size * BytesPerWord,
|
||||
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
|
||||
os::vm_page_size(), mtMetaspace);
|
||||
os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
|
||||
}
|
||||
MemTracker::record_virtual_memory_type(rs.base(), mtMetaspace);
|
||||
assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
|
||||
InternalStats::inc_num_vsnodes_births();
|
||||
return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
|
||||
|
@ -42,19 +42,19 @@
|
||||
|
||||
// Dummy constructor
|
||||
ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
|
||||
_alignment(0), _fd_for_heap(-1), _special(false), _executable(false), _flag(mtNone) {
|
||||
_alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, MEMFLAGS flag) : _fd_for_heap(-1), _flag(flag) {
|
||||
ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
|
||||
// Want to use large pages where possible. If the size is
|
||||
// not large page aligned the mapping will be a mix of
|
||||
// large and normal pages.
|
||||
size_t page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
initialize(size, alignment, page_size, nullptr, false, flag);
|
||||
initialize(size, alignment, page_size, nullptr, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size, MEMFLAGS flag) : _fd_for_heap(-1), _flag(flag) {
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
|
||||
// When a page size is given we don't want to mix large
|
||||
// and normal pages. If the size is not a multiple of the
|
||||
// page size it will be aligned up to achieve this.
|
||||
@ -63,46 +63,45 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size, MEMFLAGS f
|
||||
alignment = MAX2(preferred_page_size, alignment);
|
||||
size = align_up(size, alignment);
|
||||
}
|
||||
initialize(size, alignment, preferred_page_size, nullptr, false, flag);
|
||||
initialize(size, alignment, preferred_page_size, nullptr, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MEMFLAGS flag,
|
||||
char* requested_address) : _fd_for_heap(-1), _flag(flag) {
|
||||
initialize(size, alignment, page_size, requested_address, false, flag);
|
||||
char* requested_address) : _fd_for_heap(-1) {
|
||||
initialize(size, alignment, page_size, requested_address, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
|
||||
bool special, bool executable, MEMFLAGS flag) : _fd_for_heap(-1), _flag(flag) {
|
||||
bool special, bool executable) : _fd_for_heap(-1) {
|
||||
assert((size % os::vm_allocation_granularity()) == 0,
|
||||
"size not allocation aligned");
|
||||
initialize_members(base, size, alignment, page_size, special, executable, flag);
|
||||
initialize_members(base, size, alignment, page_size, special, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable, MEMFLAGS flag) {
|
||||
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::attempt_map_memory_to_file_at(base, size, fd, flag);
|
||||
return os::attempt_map_memory_to_file_at(base, size, fd);
|
||||
}
|
||||
return os::attempt_reserve_memory_at(base, size, executable, flag);
|
||||
return os::attempt_reserve_memory_at(base, size, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory(size_t size, int fd, bool executable, MEMFLAGS flag) {
|
||||
static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file(size, fd, flag);
|
||||
return os::map_memory_to_file(size, fd);
|
||||
}
|
||||
return os::reserve_memory(size, executable, flag);
|
||||
return os::reserve_memory(size, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable, MEMFLAGS flag) {
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file_aligned(size, alignment, fd, flag);
|
||||
return os::map_memory_to_file_aligned(size, alignment, fd);
|
||||
}
|
||||
return os::reserve_memory_aligned(size, alignment, executable, flag);
|
||||
return os::reserve_memory_aligned(size, alignment, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
@ -155,7 +154,7 @@ static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
|
||||
}
|
||||
|
||||
static char* reserve_memory(char* requested_address, const size_t size,
|
||||
const size_t alignment, int fd, bool exec, MEMFLAGS flag) {
|
||||
const size_t alignment, int fd, bool exec) {
|
||||
char* base;
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid mapping over something
|
||||
@ -164,19 +163,19 @@ static char* reserve_memory(char* requested_address, const size_t size,
|
||||
assert(is_aligned(requested_address, alignment),
|
||||
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
|
||||
p2i(requested_address), alignment);
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec, flag);
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
|
||||
} else {
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
base = map_or_reserve_memory(size, fd, exec, flag);
|
||||
base = map_or_reserve_memory(size, fd, exec);
|
||||
// Check alignment constraints. This is only needed when there is
|
||||
// no requested address.
|
||||
if (!is_aligned(base, alignment)) {
|
||||
// Base not aligned, retry.
|
||||
unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
|
||||
// Map using the requested alignment.
|
||||
base = map_or_reserve_memory_aligned(size, alignment, fd, exec, flag);
|
||||
base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
|
||||
}
|
||||
}
|
||||
|
||||
@ -184,14 +183,14 @@ static char* reserve_memory(char* requested_address, const size_t size,
|
||||
}
|
||||
|
||||
static char* reserve_memory_special(char* requested_address, const size_t size,
|
||||
const size_t alignment, const size_t page_size, bool exec, MEMFLAGS flag) {
|
||||
const size_t alignment, const size_t page_size, bool exec) {
|
||||
|
||||
log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
|
||||
"alignment: " SIZE_FORMAT "%s",
|
||||
byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
|
||||
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
|
||||
|
||||
char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec, flag);
|
||||
char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
|
||||
if (base != nullptr) {
|
||||
// Check alignment constraints.
|
||||
assert(is_aligned(base, alignment),
|
||||
@ -203,19 +202,18 @@ static char* reserve_memory_special(char* requested_address, const size_t size,
|
||||
}
|
||||
|
||||
void ReservedSpace::clear_members() {
|
||||
initialize_members(nullptr, 0, 0, 0, false, false, mtNone);
|
||||
initialize_members(nullptr, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable, MEMFLAGS flag) {
|
||||
size_t page_size, bool special, bool executable) {
|
||||
_base = base;
|
||||
_size = size;
|
||||
_noaccess_prefix = 0;
|
||||
_alignment = alignment;
|
||||
_page_size = page_size;
|
||||
_special = special;
|
||||
_executable = executable;
|
||||
_flag = flag;
|
||||
_noaccess_prefix = 0;
|
||||
}
|
||||
|
||||
void ReservedSpace::reserve(size_t size,
|
||||
@ -237,9 +235,9 @@ void ReservedSpace::reserve(size_t size,
|
||||
// When there is a backing file directory for this space then whether
|
||||
// large pages are allocated is up to the filesystem of the backing file.
|
||||
// So UseLargePages is not taken into account for this reservation.
|
||||
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable, _flag);
|
||||
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
|
||||
if (base != nullptr) {
|
||||
initialize_members(base, size, alignment, os::vm_page_size(), true, executable, _flag);
|
||||
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
|
||||
}
|
||||
// Always return, not possible to fall back to reservation not using a file.
|
||||
return;
|
||||
@ -252,10 +250,10 @@ void ReservedSpace::reserve(size_t size,
|
||||
// explicit large pages and these have to be committed up front to ensure
|
||||
// no reservations are lost.
|
||||
do {
|
||||
char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable, _flag);
|
||||
char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
|
||||
if (base != nullptr) {
|
||||
// Successful reservation using large pages.
|
||||
initialize_members(base, size, alignment, page_size, true, executable, _flag);
|
||||
initialize_members(base, size, alignment, page_size, true, executable);
|
||||
return;
|
||||
}
|
||||
page_size = os::page_sizes().next_smaller(page_size);
|
||||
@ -268,10 +266,10 @@ void ReservedSpace::reserve(size_t size,
|
||||
}
|
||||
|
||||
// == Case 3 ==
|
||||
char* base = reserve_memory(requested_address, size, alignment, -1, executable, _flag);
|
||||
char* base = reserve_memory(requested_address, size, alignment, -1, executable);
|
||||
if (base != nullptr) {
|
||||
// Successful mapping.
|
||||
initialize_members(base, size, alignment, page_size, false, executable, _flag);
|
||||
initialize_members(base, size, alignment, page_size, false, executable);
|
||||
}
|
||||
}
|
||||
|
||||
@ -279,8 +277,7 @@ void ReservedSpace::initialize(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
char* requested_address,
|
||||
bool executable,
|
||||
MEMFLAGS flag) {
|
||||
bool executable) {
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & (granularity - 1)) == 0,
|
||||
"size not aligned to os::vm_allocation_granularity()");
|
||||
@ -293,9 +290,6 @@ void ReservedSpace::initialize(size_t size,
|
||||
|
||||
clear_members();
|
||||
|
||||
// _flag is cleared in clear_members in above call
|
||||
_flag = flag;
|
||||
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
@ -316,14 +310,14 @@ void ReservedSpace::initialize(size_t size,
|
||||
|
||||
ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
|
||||
assert(partition_size <= size(), "partition failed");
|
||||
ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable(), nmt_flag());
|
||||
ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
|
||||
return result;
|
||||
}
|
||||
|
||||
ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) {
|
||||
assert(partition_size <= size(), "partition failed");
|
||||
ReservedSpace result(base() + partition_size, size() - partition_size,
|
||||
alignment, page_size(), special(), executable(), nmt_flag());
|
||||
alignment, page_size(), special(), executable());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -366,12 +360,12 @@ void ReservedSpace::release() {
|
||||
|
||||
// Put a ReservedSpace over an existing range
|
||||
ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable, MEMFLAGS flag) {
|
||||
size_t page_size, bool special, bool executable) {
|
||||
assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base");
|
||||
assert(is_aligned(size, os::vm_page_size()), "Unaligned size");
|
||||
assert(os::page_sizes().contains(page_size), "Invalid pagesize");
|
||||
ReservedSpace space;
|
||||
space.initialize_members(base, size, alignment, page_size, special, executable, flag);
|
||||
space.initialize_members(base, size, alignment, page_size, special, executable);
|
||||
return space;
|
||||
}
|
||||
|
||||
@ -613,17 +607,16 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
|
||||
// Last, desperate try without any placement.
|
||||
if (_base == nullptr) {
|
||||
log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
|
||||
initialize(size + noaccess_prefix, alignment, page_size, nullptr, !ExecMem, nmt_flag());
|
||||
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
|
||||
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
// _flag is used internally by initialize_compressed_heap
|
||||
_flag = mtJavaHeap;
|
||||
|
||||
if (heap_allocation_directory != nullptr) {
|
||||
_fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
|
||||
@ -651,7 +644,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
|
||||
establish_noaccess_prefix();
|
||||
}
|
||||
} else {
|
||||
initialize(size, alignment, page_size, nullptr, false, nmt_flag());
|
||||
initialize(size, alignment, page_size, nullptr, false);
|
||||
}
|
||||
|
||||
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
|
||||
@ -659,6 +652,9 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
|
||||
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
|
||||
if (base() != nullptr) {
|
||||
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
|
||||
}
|
||||
|
||||
if (_fd_for_heap != -1) {
|
||||
::close(_fd_for_heap);
|
||||
@ -674,7 +670,8 @@ MemRegion ReservedHeapSpace::region() const {
|
||||
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
||||
size_t rs_align,
|
||||
size_t rs_page_size) : ReservedSpace() {
|
||||
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true, mtCode);
|
||||
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
|
||||
MemTracker::record_virtual_memory_type((address)base(), mtCode);
|
||||
}
|
||||
|
||||
// VirtualSpace
|
||||
@ -695,7 +692,6 @@ VirtualSpace::VirtualSpace() {
|
||||
_upper_alignment = 0;
|
||||
_special = false;
|
||||
_executable = false;
|
||||
_flag = mtNone;
|
||||
}
|
||||
|
||||
|
||||
@ -717,7 +713,6 @@ bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committe
|
||||
|
||||
_special = rs.special();
|
||||
_executable = rs.executable();
|
||||
_flag = rs.nmt_flag();
|
||||
|
||||
// When a VirtualSpace begins life at a large size, make all future expansion
|
||||
// and shrinking occur aligned to a granularity of large pages. This avoids
|
||||
@ -776,7 +771,6 @@ void VirtualSpace::release() {
|
||||
_upper_alignment = 0;
|
||||
_special = false;
|
||||
_executable = false;
|
||||
_flag = mtNone;
|
||||
}
|
||||
|
||||
|
||||
@ -842,8 +836,8 @@ static void pretouch_expanded_memory(void* start, void* end) {
|
||||
os::pretouch_memory(start, end);
|
||||
}
|
||||
|
||||
static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable, MEMFLAGS flag) {
|
||||
if (os::commit_memory(start, size, alignment, executable, flag)) {
|
||||
static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
|
||||
if (os::commit_memory(start, size, alignment, executable)) {
|
||||
if (pre_touch || AlwaysPreTouch) {
|
||||
pretouch_expanded_memory(start, start + size);
|
||||
}
|
||||
@ -932,7 +926,7 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
// Commit regions
|
||||
if (lower_needs > 0) {
|
||||
assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
|
||||
if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable, _flag)) {
|
||||
if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
|
||||
return false;
|
||||
}
|
||||
_lower_high += lower_needs;
|
||||
@ -940,7 +934,7 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
|
||||
if (middle_needs > 0) {
|
||||
assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
|
||||
if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable, _flag)) {
|
||||
if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
|
||||
return false;
|
||||
}
|
||||
_middle_high += middle_needs;
|
||||
@ -948,7 +942,7 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
|
||||
if (upper_needs > 0) {
|
||||
assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
|
||||
if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable, _flag)) {
|
||||
if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
|
||||
return false;
|
||||
}
|
||||
_upper_high += upper_needs;
|
||||
@ -1020,7 +1014,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(middle_high_boundary() <= aligned_upper_new_high &&
|
||||
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable, _flag)) {
|
||||
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
@ -1031,7 +1025,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(lower_high_boundary() <= aligned_middle_new_high &&
|
||||
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable, _flag)) {
|
||||
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
@ -1042,7 +1036,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(low_boundary() <= aligned_lower_new_high &&
|
||||
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable, _flag)) {
|
||||
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,20 +35,19 @@ class outputStream;
|
||||
class ReservedSpace {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _noaccess_prefix;
|
||||
size_t _alignment;
|
||||
size_t _page_size;
|
||||
int _fd_for_heap;
|
||||
bool _special;
|
||||
bool _executable;
|
||||
MEMFLAGS _flag;
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _noaccess_prefix;
|
||||
size_t _alignment;
|
||||
size_t _page_size;
|
||||
bool _special;
|
||||
int _fd_for_heap;
|
||||
private:
|
||||
bool _executable;
|
||||
|
||||
// ReservedSpace
|
||||
ReservedSpace(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable, MEMFLAGS flag);
|
||||
size_t page_size, bool special, bool executable);
|
||||
protected:
|
||||
// Helpers to clear and set members during initialization. Two members
|
||||
// require special treatment:
|
||||
@ -59,28 +58,25 @@ class ReservedSpace {
|
||||
// 0 during initialization.
|
||||
void clear_members();
|
||||
void initialize_members(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable, MEMFLAGS flag);
|
||||
size_t page_size, bool special, bool executable);
|
||||
|
||||
void initialize(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address, bool executable, MEMFLAGS flag);
|
||||
char* requested_address, bool executable);
|
||||
|
||||
void reserve(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address, bool executable);
|
||||
public:
|
||||
|
||||
MEMFLAGS nmt_flag() const { return _flag; }
|
||||
|
||||
// Constructor
|
||||
ReservedSpace();
|
||||
// Initialize the reserved space with the given size. Depending on the size
|
||||
// a suitable page size and alignment will be used.
|
||||
explicit ReservedSpace(size_t size, MEMFLAGS flag);
|
||||
explicit ReservedSpace(size_t size);
|
||||
// Initialize the reserved space with the given size. The preferred_page_size
|
||||
// is used as the minimum page size/alignment. This may waste some space if
|
||||
// the given size is not aligned to that value, as the reservation will be
|
||||
// aligned up to the final alignment in this case.
|
||||
ReservedSpace(size_t size, size_t preferred_page_size, MEMFLAGS flag);
|
||||
ReservedSpace(size_t size, size_t alignment, size_t page_size, MEMFLAGS flag,
|
||||
ReservedSpace(size_t size, size_t preferred_page_size);
|
||||
ReservedSpace(size_t size, size_t alignment, size_t page_size,
|
||||
char* requested_address = nullptr);
|
||||
|
||||
// Accessors
|
||||
@ -116,7 +112,7 @@ class ReservedSpace {
|
||||
|
||||
// Put a ReservedSpace over an existing range
|
||||
static ReservedSpace space_for_range(char* base, size_t size, size_t alignment,
|
||||
size_t page_size, bool special, bool executable, MEMFLAGS flag);
|
||||
size_t page_size, bool special, bool executable);
|
||||
};
|
||||
|
||||
ReservedSpace ReservedSpace::first_part(size_t partition_size)
|
||||
@ -183,8 +179,6 @@ class VirtualSpace {
|
||||
// Need to know if commit should be executable.
|
||||
bool _executable;
|
||||
|
||||
MEMFLAGS _flag;
|
||||
|
||||
// MPSS Support
|
||||
// Each virtualspace region has a lower, middle, and upper region.
|
||||
// Each region has an end boundary and a high pointer which is the
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -120,7 +120,7 @@ class MemTracker : AllStatic {
|
||||
// (we do not do any reservations before that).
|
||||
|
||||
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag) {
|
||||
MEMFLAGS flag = mtNone) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
@ -137,32 +137,32 @@ class MemTracker : AllStatic {
|
||||
}
|
||||
}
|
||||
|
||||
static inline void record_virtual_memory_uncommit(address addr, size_t size, MEMFLAGS flag) {
|
||||
static inline void record_virtual_memory_uncommit(address addr, size_t size) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
VirtualMemoryTracker::remove_uncommitted_region((address)addr, size, flag);
|
||||
VirtualMemoryTracker::remove_uncommitted_region((address)addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag) {
|
||||
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
ThreadCritical tc;
|
||||
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
|
||||
VirtualMemoryTracker::add_committed_region((address)addr, size, stack, flag);
|
||||
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void record_virtual_memory_commit(void* addr, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag) {
|
||||
const NativeCallStack& stack) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
ThreadCritical tc;
|
||||
VirtualMemoryTracker::add_committed_region((address)addr, size, stack, flag);
|
||||
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -424,20 +424,20 @@ void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag)
|
||||
if (reserved_rgn != nullptr) {
|
||||
assert(reserved_rgn->contain_address(addr), "Containment");
|
||||
if (reserved_rgn->flag() != flag) {
|
||||
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\") wants to change to \"%s\"",
|
||||
NMTUtil::flag_to_name(reserved_rgn->flag()), NMTUtil::flag_to_name(flag));
|
||||
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
|
||||
NMTUtil::flag_to_name(reserved_rgn->flag()));
|
||||
reserved_rgn->set_flag(flag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag) {
|
||||
const NativeCallStack& stack) {
|
||||
assert(addr != nullptr, "Invalid address");
|
||||
assert(size > 0, "Invalid size");
|
||||
assert(_reserved_regions != nullptr, "Sanity check");
|
||||
|
||||
ReservedMemoryRegion rgn(addr, size, stack, flag);
|
||||
ReservedMemoryRegion rgn(addr, size);
|
||||
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
|
||||
|
||||
if (reserved_rgn == nullptr) {
|
||||
@ -452,7 +452,7 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
|
||||
return result;
|
||||
}
|
||||
|
||||
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size, MEMFLAGS flag) {
|
||||
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
|
||||
assert(addr != nullptr, "Invalid address");
|
||||
assert(size > 0, "Invalid size");
|
||||
assert(_reserved_regions != nullptr, "Sanity check");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -296,16 +296,13 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
|
||||
|
||||
public:
|
||||
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag) :
|
||||
MEMFLAGS flag = mtNone) :
|
||||
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { }
|
||||
|
||||
|
||||
ReservedMemoryRegion(address base, size_t size) :
|
||||
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone) { }
|
||||
|
||||
ReservedMemoryRegion(address base, size_t size, MEMFLAGS flag) :
|
||||
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(flag) { }
|
||||
|
||||
// Copy constructor
|
||||
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
|
||||
VirtualMemoryRegion(rr.base(), rr.size()) {
|
||||
@ -382,10 +379,10 @@ class VirtualMemoryTracker : AllStatic {
|
||||
public:
|
||||
static bool initialize(NMT_TrackingLevel level);
|
||||
|
||||
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag);
|
||||
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone);
|
||||
|
||||
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag);
|
||||
static bool remove_uncommitted_region (address base_addr, size_t size, MEMFLAGS flag);
|
||||
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
|
||||
static bool remove_uncommitted_region (address base_addr, size_t size);
|
||||
static bool remove_released_region (address base_addr, size_t size);
|
||||
static bool remove_released_region (ReservedMemoryRegion* rgn);
|
||||
static void set_reserved_region_type (address addr, MEMFLAGS flag);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,7 +69,7 @@ void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t
|
||||
|
||||
char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
|
||||
alignment = MAX2(Metaspace::reserve_alignment(), alignment);
|
||||
return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr, mtMetaspace);
|
||||
return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
|
||||
}
|
||||
|
||||
char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012 Red Hat, Inc.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -2396,10 +2396,11 @@ static char* get_bad_address() {
|
||||
static char* bad_address = nullptr;
|
||||
if (bad_address == nullptr) {
|
||||
size_t size = os::vm_allocation_granularity();
|
||||
bad_address = os::reserve_memory(size, !ExecMem, mtInternal);
|
||||
bad_address = os::reserve_memory(size);
|
||||
if (bad_address != nullptr) {
|
||||
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
|
||||
/*is_committed*/false);
|
||||
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
|
||||
}
|
||||
}
|
||||
return bad_address;
|
||||
|
@ -697,21 +697,27 @@ WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
||||
jlong addr = 0;
|
||||
addr = (jlong)(uintptr_t)os::reserve_memory(size, !ExecMem, mtTest);
|
||||
|
||||
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
||||
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
||||
|
||||
return addr;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size, !ExecMem, mtTest);
|
||||
addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size);
|
||||
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
||||
|
||||
return addr;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem, mtTest);
|
||||
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
|
||||
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
os::uncommit_memory((char *)(uintptr_t)addr, size, !ExecMem, mtTest);
|
||||
os::uncommit_memory((char *)(uintptr_t)addr, size);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
@ -1480,7 +1486,7 @@ WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
|
||||
static char c;
|
||||
static volatile char* p;
|
||||
|
||||
p = os::reserve_memory(os::vm_allocation_granularity(), !ExecMem, mtTest);
|
||||
p = os::reserve_memory(os::vm_allocation_granularity());
|
||||
if (p == nullptr) {
|
||||
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory");
|
||||
}
|
||||
|
@ -1821,7 +1821,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, executable, flags);
|
||||
char* result = pd_reserve_memory(bytes, executable);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags);
|
||||
log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes));
|
||||
@ -1832,7 +1832,7 @@ char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
|
||||
}
|
||||
|
||||
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MEMFLAGS flag) {
|
||||
char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable, flag);
|
||||
char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, flag);
|
||||
log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes));
|
||||
@ -1879,7 +1879,7 @@ static void hemi_split(T* arr, unsigned num) {
|
||||
|
||||
// Given an address range [min, max), attempts to reserve memory within this area, with the given alignment.
|
||||
// If randomize is true, the location will be randomized.
|
||||
char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize, MEMFLAGS flag) {
|
||||
char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize) {
|
||||
|
||||
// Please keep the following constants in sync with the companion gtests:
|
||||
|
||||
@ -2017,7 +2017,7 @@ char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, siz
|
||||
const unsigned candidate_offset = points[i];
|
||||
char* const candidate = lo_att + candidate_offset * alignment_adjusted;
|
||||
assert(candidate <= hi_att, "Invalid offset %u (" ARGSFMT ")", candidate_offset, ARGSFMTARGS);
|
||||
result = SimulateFullAddressSpace ? nullptr : os::pd_attempt_reserve_memory_at(candidate, bytes, !ExecMem, flag);
|
||||
result = SimulateFullAddressSpace ? nullptr : os::pd_attempt_reserve_memory_at(candidate, bytes, false);
|
||||
if (!result) {
|
||||
log_trace(os, map)("Failed to attach at " PTR_FORMAT, p2i(candidate));
|
||||
}
|
||||
@ -2034,7 +2034,7 @@ char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, siz
|
||||
assert(is_aligned(result, alignment), "alignment invalid (" ERRFMT ")", ERRFMTARGS);
|
||||
log_trace(os, map)(ERRFMT, ERRFMTARGS);
|
||||
log_debug(os, map)("successfully attached at " PTR_FORMAT, p2i(result));
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
} else {
|
||||
log_debug(os, map)("failed to attach anywhere in [" PTR_FORMAT "-" PTR_FORMAT ")", p2i(min), p2i(max));
|
||||
}
|
||||
@ -2050,11 +2050,11 @@ static void assert_nonempty_range(const char* addr, size_t bytes) {
|
||||
p2i(addr), p2i(addr) + bytes);
|
||||
}
|
||||
|
||||
bool os::commit_memory(char* addr, size_t bytes, bool executable, MEMFLAGS flag) {
|
||||
bool os::commit_memory(char* addr, size_t bytes, bool executable) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
bool res = pd_commit_memory(addr, bytes, executable);
|
||||
if (res) {
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
|
||||
log_debug(os, map)("Committed " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
} else {
|
||||
log_info(os, map)("Failed to commit " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
@ -2063,11 +2063,11 @@ bool os::commit_memory(char* addr, size_t bytes, bool executable, MEMFLAGS flag)
|
||||
}
|
||||
|
||||
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable, MEMFLAGS flag) {
|
||||
bool executable) {
|
||||
assert_nonempty_range(addr, size);
|
||||
bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
|
||||
if (res) {
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
|
||||
log_debug(os, map)("Committed " RANGEFMT, RANGEFMTARGS(addr, size));
|
||||
} else {
|
||||
log_info(os, map)("Failed to commit " RANGEFMT, RANGEFMTARGS(addr, size));
|
||||
@ -2076,27 +2076,27 @@ bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
}
|
||||
|
||||
void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
|
||||
MEMFLAGS flag, const char* mesg) {
|
||||
const char* mesg) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
pd_commit_memory_or_exit(addr, bytes, executable, mesg);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable, MEMFLAGS flag, const char* mesg) {
|
||||
bool executable, const char* mesg) {
|
||||
assert_nonempty_range(addr, size);
|
||||
os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
|
||||
}
|
||||
|
||||
bool os::uncommit_memory(char* addr, size_t bytes, bool executable, MEMFLAGS flag) {
|
||||
bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
bool res;
|
||||
if (MemTracker::enabled()) {
|
||||
ThreadCritical tc;
|
||||
res = pd_uncommit_memory(addr, bytes, executable);
|
||||
if (res) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, bytes, flag);
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
|
||||
}
|
||||
} else {
|
||||
res = pd_uncommit_memory(addr, bytes, executable);
|
||||
@ -2180,7 +2180,7 @@ char* os::map_memory_to_file(size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
}
|
||||
|
||||
char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MEMFLAGS flag) {
|
||||
char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc, flag);
|
||||
char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flag);
|
||||
}
|
||||
@ -2188,8 +2188,8 @@ char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc,
|
||||
}
|
||||
|
||||
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MEMFLAGS flags) {
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MEMFLAGS flags) {
|
||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags);
|
||||
@ -2211,8 +2211,8 @@ bool os::unmap_memory(char *addr, size_t bytes) {
|
||||
return result;
|
||||
}
|
||||
|
||||
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
|
||||
pd_free_memory(addr, bytes, alignment_hint, flag);
|
||||
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
pd_free_memory(addr, bytes, alignment_hint);
|
||||
}
|
||||
|
||||
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
@ -2220,14 +2220,14 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
|
||||
char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size,
|
||||
char* addr, bool executable, MEMFLAGS flag) {
|
||||
char* addr, bool executable) {
|
||||
|
||||
assert(is_aligned(addr, alignment), "Unaligned request address");
|
||||
|
||||
char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable, flag);
|
||||
char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable);
|
||||
if (result != nullptr) {
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC, flag);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC);
|
||||
log_debug(os, map)("Reserved and committed " RANGEFMT, RANGEFMTARGS(result, size));
|
||||
} else {
|
||||
log_info(os, map)("Reserve and commit failed (%zu bytes)", size);
|
||||
|
@ -208,9 +208,9 @@ class os: AllStatic {
|
||||
// low enough to leave most of the valuable low-4gb address space open.
|
||||
static constexpr size_t _vm_min_address_default = 16 * M;
|
||||
|
||||
static char* pd_reserve_memory(size_t bytes, bool executable, MEMFLAGS flag);
|
||||
static char* pd_reserve_memory(size_t bytes, bool executable);
|
||||
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MEMFLAGS flag);
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool executable);
|
||||
|
||||
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
@ -225,13 +225,13 @@ class os: AllStatic {
|
||||
static bool pd_uncommit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_release_memory(char* addr, size_t bytes);
|
||||
|
||||
static char* pd_attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MEMFLAGS flag);
|
||||
static char* pd_attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc);
|
||||
|
||||
static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only = false,
|
||||
bool allow_exec = false);
|
||||
char *addr, size_t bytes, bool read_only = false,
|
||||
bool allow_exec = false);
|
||||
static bool pd_unmap_memory(char *addr, size_t bytes);
|
||||
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag);
|
||||
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
|
||||
// Returns 0 if pretouch is done via platform dependent method, or otherwise
|
||||
@ -239,7 +239,8 @@ class os: AllStatic {
|
||||
static size_t pd_pretouch_memory(void* first, void* last, size_t page_size);
|
||||
|
||||
static char* pd_reserve_memory_special(size_t size, size_t alignment, size_t page_size,
|
||||
char* addr, bool executable, MEMFLAGS flag);
|
||||
|
||||
char* addr, bool executable);
|
||||
static bool pd_release_memory_special(char* addr, size_t bytes);
|
||||
|
||||
static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
|
||||
@ -447,30 +448,30 @@ class os: AllStatic {
|
||||
inline static size_t cds_core_region_alignment();
|
||||
|
||||
// Reserves virtual memory.
|
||||
static char* reserve_memory(size_t bytes, bool executable, MEMFLAGS flags);
|
||||
static char* reserve_memory(size_t bytes, bool executable = false, MEMFLAGS flags = mtNone);
|
||||
|
||||
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable, MEMFLAGS flag);
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false);
|
||||
|
||||
// Attempts to reserve the virtual memory at [addr, addr + bytes).
|
||||
// Does not overwrite existing mappings.
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MEMFLAGS flag);
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MEMFLAGS flag = mtNone);
|
||||
|
||||
// Given an address range [min, max), attempts to reserve memory within this area, with the given alignment.
|
||||
// If randomize is true, the location will be randomized.
|
||||
static char* attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize, MEMFLAGS flag);
|
||||
static char* attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize);
|
||||
|
||||
static bool commit_memory(char* addr, size_t bytes, bool executable, MEMFLAGS flag);
|
||||
static bool commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable, MEMFLAGS flag);
|
||||
bool executable);
|
||||
// Same as commit_memory() that either succeeds or calls
|
||||
// vm_exit_out_of_memory() with the specified mesg.
|
||||
static void commit_memory_or_exit(char* addr, size_t bytes,
|
||||
bool executable, MEMFLAGS flag, const char* mesg);
|
||||
bool executable, const char* mesg);
|
||||
static void commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, MEMFLAGS flag, const char* mesg);
|
||||
static bool uncommit_memory(char* addr, size_t bytes, bool executable, MEMFLAGS flag);
|
||||
bool executable, const char* mesg);
|
||||
static bool uncommit_memory(char* addr, size_t bytes, bool executable = false);
|
||||
static bool release_memory(char* addr, size_t bytes);
|
||||
|
||||
// Does the platform support trimming the native heap?
|
||||
@ -506,18 +507,18 @@ class os: AllStatic {
|
||||
static int create_file_for_heap(const char* dir);
|
||||
// Map memory to the file referred by fd. This function is slightly different from map_memory()
|
||||
// and is added to be used for implementation of -XX:AllocateHeapAt
|
||||
static char* map_memory_to_file(size_t size, int fd, MEMFLAGS flag);
|
||||
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag);
|
||||
static char* map_memory_to_file(size_t size, int fd, MEMFLAGS flag = mtNone);
|
||||
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag = mtNone);
|
||||
static char* map_memory_to_file(char* base, size_t size, int fd);
|
||||
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MEMFLAGS flag);
|
||||
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MEMFLAGS flag = mtNone);
|
||||
// Replace existing reserved memory with file mapping
|
||||
static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
|
||||
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MEMFLAGS flag);
|
||||
char *addr, size_t bytes, bool read_only = false,
|
||||
bool allow_exec = false, MEMFLAGS flags = mtNone);
|
||||
static bool unmap_memory(char *addr, size_t bytes);
|
||||
static void free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag);
|
||||
static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
|
||||
// NUMA-specific interface
|
||||
@ -539,7 +540,7 @@ class os: AllStatic {
|
||||
static char* non_memory_address_word();
|
||||
// reserve, commit and pin the entire memory region
|
||||
static char* reserve_memory_special(size_t size, size_t alignment, size_t page_size,
|
||||
char* addr, bool executable, MEMFLAGS flag);
|
||||
char* addr, bool executable);
|
||||
static bool release_memory_special(char* addr, size_t bytes);
|
||||
static void large_page_init();
|
||||
static size_t large_page_size();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,8 +58,9 @@ void SafepointMechanism::default_initialize() {
|
||||
// Polling page
|
||||
const size_t page_size = os::vm_page_size();
|
||||
const size_t allocation_size = 2 * page_size;
|
||||
char* polling_page = os::reserve_memory(allocation_size, !ExecMem, mtSafepoint);
|
||||
os::commit_memory_or_exit(polling_page, allocation_size, !ExecMem, mtSafepoint, "Unable to commit Safepoint polling page");
|
||||
char* polling_page = os::reserve_memory(allocation_size);
|
||||
os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
|
||||
MemTracker::record_virtual_memory_type((address)polling_page, mtSafepoint);
|
||||
|
||||
char* bad_page = polling_page;
|
||||
char* good_page = polling_page + page_size;
|
||||
|
@ -709,9 +709,10 @@ struct TestMultipleStaticAssertFormsInClassScope {
|
||||
static ucontext_t g_stored_assertion_context;
|
||||
|
||||
void initialize_assert_poison() {
|
||||
char* page = os::reserve_memory(os::vm_page_size(), !ExecMem, mtInternal);
|
||||
char* page = os::reserve_memory(os::vm_page_size());
|
||||
if (page) {
|
||||
if (os::commit_memory(page, os::vm_page_size(), !ExecMem, mtInternal) &&
|
||||
MemTracker::record_virtual_memory_type(page, mtInternal);
|
||||
if (os::commit_memory(page, os::vm_page_size(), false) &&
|
||||
os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) {
|
||||
g_assert_poison = page;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ TEST_OTHER_VM(FreeRegionList, length) {
|
||||
// the BOT.
|
||||
size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size());
|
||||
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
|
||||
ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()), mtTest);
|
||||
ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()));
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||
bot_rs.size(),
|
||||
|
@ -81,7 +81,7 @@ TEST_VM(G1RegionToSpaceMapper, smallStressAdjacent) {
|
||||
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, os::vm_page_size(), mtTest);
|
||||
ReservedSpace rs(size, os::vm_page_size());
|
||||
|
||||
G1RegionToSpaceMapper* small_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
@ -105,7 +105,7 @@ TEST_VM(G1RegionToSpaceMapper, largeStressAdjacent) {
|
||||
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, page_size, mtTest);
|
||||
ReservedSpace rs(size, page_size);
|
||||
|
||||
G1RegionToSpaceMapper* large_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,7 +56,7 @@ public:
|
||||
const size_t increment = MAX2(align_up(unused / 100, ZGranuleSize), ZGranuleSize);
|
||||
|
||||
for (uintptr_t start = 0; start + ZGranuleSize <= ZAddressOffsetMax; start += increment) {
|
||||
char* const reserved = os::attempt_reserve_memory_at((char*)ZAddressHeapBase + start, ZGranuleSize, !ExecMem /* executable */, mtTest);
|
||||
char* const reserved = os::attempt_reserve_memory_at((char*)ZAddressHeapBase + start, ZGranuleSize, false /* executable */);
|
||||
if (reserved != nullptr) {
|
||||
// Success
|
||||
return reserved;
|
||||
@ -100,7 +100,7 @@ public:
|
||||
|
||||
_reserved = reserved;
|
||||
|
||||
os::commit_memory((char*)_reserved, ZGranuleSize, !ExecMem /* executable */, mtTest);
|
||||
os::commit_memory((char*)_reserved, ZGranuleSize, false /* executable */);
|
||||
|
||||
_page_offset = uintptr_t(_reserved) - ZAddressHeapBase;
|
||||
}
|
||||
@ -111,7 +111,7 @@ public:
|
||||
ZGeneration::_old = _old_old;
|
||||
ZGeneration::_young = _old_young;
|
||||
if (_reserved != nullptr) {
|
||||
os::uncommit_memory((char*)_reserved, ZGranuleSize, !ExecMem, mtTest);
|
||||
os::uncommit_memory((char*)_reserved, ZGranuleSize, false /* executable */);
|
||||
os::release_memory((char*)_reserved, ZGranuleSize);
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ namespace {
|
||||
static void test_reserved_size(size_t size) {
|
||||
ASSERT_PRED2(is_size_aligned, size, os::vm_allocation_granularity());
|
||||
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
MemoryReleaser releaser(&rs);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr) << "rs.special: " << rs.special();
|
||||
@ -78,7 +78,7 @@ namespace {
|
||||
static void test_reserved_size_alignment(size_t size, size_t alignment) {
|
||||
ASSERT_PRED2(is_size_aligned, size, alignment) << "Incorrect input parameters";
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
ReservedSpace rs(size, alignment, page_size, mtTest, (char *) nullptr);
|
||||
ReservedSpace rs(size, alignment, page_size, (char *) nullptr);
|
||||
|
||||
ASSERT_TRUE(rs.base() != nullptr) << "rs.special = " << rs.special();
|
||||
ASSERT_EQ(size, rs.size()) << "rs.special = " << rs.special();
|
||||
@ -106,7 +106,7 @@ namespace {
|
||||
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, alignment, page_size, mtTest);
|
||||
ReservedSpace rs(size, alignment, page_size);
|
||||
MemoryReleaser releaser(&rs);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr) << "rs.special: " << rs.special();
|
||||
@ -215,13 +215,12 @@ namespace {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return ReservedSpace(reserve_size_aligned, mtTest);
|
||||
return ReservedSpace(reserve_size_aligned);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return ReservedSpace(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTest);
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,7 +299,7 @@ TEST_VM(VirtualSpace, actual_committed_space_one_large_page) {
|
||||
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size, mtTest);
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
|
||||
ReservedSpaceReleaser releaser(&reserved);
|
||||
ASSERT_TRUE(reserved.is_reserved());
|
||||
|
||||
@ -370,7 +369,6 @@ class TestReservedSpace : AllStatic {
|
||||
ReservedSpace rs(size, // size
|
||||
alignment, // alignment
|
||||
page_size, // page size
|
||||
mtTest, // NMT MEM Flag
|
||||
(char *)nullptr); // requested_address
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
@ -389,7 +387,7 @@ class TestReservedSpace : AllStatic {
|
||||
static void test_reserved_space2(size_t size) {
|
||||
ASSERT_TRUE(is_aligned(size, os::vm_allocation_granularity())) << "Must be at least AG aligned";
|
||||
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -414,7 +412,7 @@ class TestReservedSpace : AllStatic {
|
||||
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
|
||||
|
||||
ReservedSpace rs(size, alignment, page_size, mtTest);
|
||||
ReservedSpace rs(size, alignment, page_size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -518,12 +516,12 @@ class TestVirtualSpace : AllStatic {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return ReservedSpace(reserve_size_aligned, mtTest);
|
||||
return ReservedSpace(reserve_size_aligned);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return ReservedSpace(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(), mtTest);
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -578,7 +576,7 @@ class TestVirtualSpace : AllStatic {
|
||||
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size, mtTest);
|
||||
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
|
||||
|
||||
EXPECT_TRUE(reserved.is_reserved());
|
||||
|
||||
|
@ -114,7 +114,7 @@ TEST_VM(NMT, DISABLED_location_printing_cheap_dead_7) { test_for_dead_c_heap_blo
|
||||
#endif
|
||||
|
||||
static void test_for_mmap(size_t sz, ssize_t offset) {
|
||||
char* addr = os::reserve_memory(sz, !ExecMem, mtTest);
|
||||
char* addr = os::reserve_memory(sz, false, mtTest);
|
||||
if (MemTracker::enabled()) {
|
||||
test_pointer(addr + offset, true, "in mmap'd memory region");
|
||||
} else {
|
||||
|
@ -93,7 +93,7 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, !ExecMem, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, !ExecMem);
|
||||
size_t index;
|
||||
ASSERT_NE(base, (char*)nullptr);
|
||||
for (index = 0; index < touch_pages; index ++) {
|
||||
@ -132,7 +132,7 @@ public:
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
os::free_memory(base, size, page_sz, mtThreadStack);
|
||||
os::free_memory(base, size, page_sz);
|
||||
VirtualMemoryTracker::remove_released_region((address)base, size);
|
||||
|
||||
rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
|
||||
@ -162,7 +162,7 @@ public:
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtTest);
|
||||
ASSERT_NE(base, (char*)nullptr);
|
||||
result = os::commit_memory(base, size, !ExecMem, mtTest);
|
||||
result = os::commit_memory(base, size, !ExecMem);
|
||||
|
||||
ASSERT_TRUE(result);
|
||||
// touch all pages
|
||||
|
@ -367,7 +367,7 @@ TEST_VM(os, jio_snprintf) {
|
||||
static inline bool can_reserve_executable_memory(void) {
|
||||
bool executable = true;
|
||||
size_t len = 128;
|
||||
char* p = os::reserve_memory(len, executable, mtTest);
|
||||
char* p = os::reserve_memory(len, executable);
|
||||
bool exec_supported = (p != nullptr);
|
||||
if (exec_supported) {
|
||||
os::release_memory(p, len);
|
||||
@ -405,7 +405,7 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
for (int tries = 0; tries < 256 && p == nullptr; tries ++) {
|
||||
size_t total_range_len = num_stripes * stripe_len;
|
||||
// Reserve a large contiguous area to get the address space...
|
||||
p = (address)os::reserve_memory(total_range_len, !ExecMem, mtTest);
|
||||
p = (address)os::reserve_memory(total_range_len);
|
||||
EXPECT_NE(p, (address)nullptr);
|
||||
// .. release it...
|
||||
EXPECT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
@ -419,14 +419,14 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
#else
|
||||
const bool executable = stripe % 2 == 0;
|
||||
#endif
|
||||
q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, executable, mtTest);
|
||||
q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, executable);
|
||||
if (q == nullptr) {
|
||||
// Someone grabbed that area concurrently. Cleanup, then retry.
|
||||
tty->print_cr("reserve_multiple: retry (%d)...", stripe);
|
||||
carefully_release_multiple(p, stripe, stripe_len);
|
||||
p = nullptr;
|
||||
} else {
|
||||
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, executable, mtTest));
|
||||
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, executable));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -439,12 +439,12 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
static address reserve_one_commit_multiple(int num_stripes, size_t stripe_len) {
|
||||
assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
|
||||
size_t total_range_len = num_stripes * stripe_len;
|
||||
address p = (address)os::reserve_memory(total_range_len, !ExecMem, mtTest);
|
||||
address p = (address)os::reserve_memory(total_range_len);
|
||||
EXPECT_NE(p, (address)nullptr);
|
||||
for (int stripe = 0; stripe < num_stripes; stripe++) {
|
||||
address q = p + (stripe * stripe_len);
|
||||
if (stripe % 2 == 0) {
|
||||
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, !ExecMem, mtTest));
|
||||
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, false));
|
||||
}
|
||||
}
|
||||
return p;
|
||||
@ -506,7 +506,7 @@ TEST_VM(os, release_multi_mappings) {
|
||||
PRINT_MAPPINGS("B");
|
||||
|
||||
// ...re-reserve the middle stripes. This should work unless release silently failed.
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len, !ExecMem, mtTest);
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len);
|
||||
|
||||
ASSERT_EQ(p2, p_middle_stripes);
|
||||
|
||||
@ -529,7 +529,7 @@ TEST_VM_ASSERT_MSG(os, release_bad_ranges, ".*bad release") {
|
||||
#else
|
||||
TEST_VM(os, release_bad_ranges) {
|
||||
#endif
|
||||
char* p = os::reserve_memory(4 * M, !ExecMem, mtTest);
|
||||
char* p = os::reserve_memory(4 * M);
|
||||
ASSERT_NE(p, (char*)nullptr);
|
||||
// Release part of range
|
||||
ASSERT_FALSE(os::release_memory(p, M));
|
||||
@ -564,7 +564,7 @@ TEST_VM(os, release_one_mapping_multi_commits) {
|
||||
|
||||
// // make things even more difficult by trying to reserve at the border of the region
|
||||
address border = p + num_stripes * stripe_len;
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)border, stripe_len, !ExecMem, mtTest);
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)border, stripe_len);
|
||||
PRINT_MAPPINGS("B");
|
||||
|
||||
ASSERT_TRUE(p2 == nullptr || p2 == border);
|
||||
@ -605,9 +605,9 @@ TEST_VM(os, show_mappings_small_range) {
|
||||
TEST_VM(os, show_mappings_full_range) {
|
||||
// Reserve a small range and fill it with a marker string, should show up
|
||||
// on implementations displaying range snippets
|
||||
char* p = os::reserve_memory(1 * M, !ExecMem, mtInternal);
|
||||
char* p = os::reserve_memory(1 * M, false, mtInternal);
|
||||
if (p != nullptr) {
|
||||
if (os::commit_memory(p, 1 * M, !ExecMem, mtTest)) {
|
||||
if (os::commit_memory(p, 1 * M, false)) {
|
||||
strcpy(p, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
|
||||
}
|
||||
}
|
||||
@ -629,7 +629,7 @@ TEST_VM(os, find_mapping_simple) {
|
||||
|
||||
// A simple allocation
|
||||
{
|
||||
address p = (address)os::reserve_memory(total_range_len, !ExecMem, mtTest);
|
||||
address p = (address)os::reserve_memory(total_range_len);
|
||||
ASSERT_NE(p, (address)nullptr);
|
||||
PRINT_MAPPINGS("A");
|
||||
for (size_t offset = 0; offset < total_range_len; offset += 4711) {
|
||||
@ -934,9 +934,9 @@ TEST_VM(os, open_O_CLOEXEC) {
|
||||
}
|
||||
|
||||
TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_smallpages) {
|
||||
char* p1 = os::reserve_memory(M, !ExecMem, mtTest);
|
||||
char* p1 = os::reserve_memory(M, false, mtTest);
|
||||
ASSERT_NE(p1, nullptr);
|
||||
char* p2 = os::attempt_reserve_memory_at(p1, M, !ExecMem, mtTest);
|
||||
char* p2 = os::attempt_reserve_memory_at(p1, M);
|
||||
ASSERT_EQ(p2, nullptr); // should have failed
|
||||
os::release_memory(p1, M);
|
||||
}
|
||||
@ -944,9 +944,9 @@ TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_smallpages) {
|
||||
TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_largepages) {
|
||||
if (UseLargePages && !os::can_commit_large_page_memory()) { // aka special
|
||||
const size_t lpsz = os::large_page_size();
|
||||
char* p1 = os::reserve_memory_aligned(lpsz, lpsz, !ExecMem, mtTest);
|
||||
char* p1 = os::reserve_memory_aligned(lpsz, lpsz, false);
|
||||
ASSERT_NE(p1, nullptr);
|
||||
char* p2 = os::reserve_memory_special(lpsz, lpsz, lpsz, p1, !ExecMem, mtTest);
|
||||
char* p2 = os::reserve_memory_special(lpsz, lpsz, lpsz, p1, false);
|
||||
ASSERT_EQ(p2, nullptr); // should have failed
|
||||
os::release_memory(p1, M);
|
||||
} else {
|
||||
@ -958,9 +958,9 @@ TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_largepages) {
|
||||
// On Aix, we should fail attach attempts not aligned to segment boundaries (256m)
|
||||
TEST_VM(os, aix_reserve_at_non_shmlba_aligned_address) {
|
||||
if (Use64KPages) {
|
||||
char* p = os::attempt_reserve_memory_at((char*)0x1f00000, M, !ExecMem, mtTest);
|
||||
char* p = os::attempt_reserve_memory_at((char*)0x1f00000, M);
|
||||
ASSERT_EQ(p, nullptr); // should have failed
|
||||
p = os::attempt_reserve_memory_at((char*)((64 * G) + M), M, !ExecMem, mtTest);
|
||||
p = os::attempt_reserve_memory_at((char*)((64 * G) + M), M);
|
||||
ASSERT_EQ(p, nullptr); // should have failed
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ namespace {
|
||||
const size_t _size;
|
||||
public:
|
||||
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
|
||||
return os::reserve_memory_special(bytes, alignment, page_size, req_addr, exec, mtTest);
|
||||
return os::reserve_memory_special(bytes, alignment, page_size, req_addr, exec);
|
||||
}
|
||||
HugeTlbfsMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
|
||||
~HugeTlbfsMemory() {
|
||||
@ -224,7 +224,7 @@ class TestReserveMemorySpecial : AllStatic {
|
||||
if (!using_explicit_hugepages()) {
|
||||
return;
|
||||
}
|
||||
char* addr = os::reserve_memory_special(size, alignment, page_size, nullptr, !ExecMem, mtTest);
|
||||
char* addr = os::reserve_memory_special(size, alignment, page_size, nullptr, false);
|
||||
if (addr != nullptr) {
|
||||
small_page_write(addr, size);
|
||||
os::release_memory_special(addr, size);
|
||||
@ -281,7 +281,7 @@ class TestReserveMemorySpecial : AllStatic {
|
||||
for (int i = 0; i < num_sizes; i++) {
|
||||
const size_t size = sizes[i];
|
||||
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, nullptr, !ExecMem, mtTest);
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, nullptr, false);
|
||||
if (p != nullptr) {
|
||||
EXPECT_TRUE(is_aligned(p, alignment));
|
||||
small_page_write(p, size);
|
||||
@ -296,7 +296,7 @@ class TestReserveMemorySpecial : AllStatic {
|
||||
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
|
||||
// req_addr must be at least large page aligned.
|
||||
char* const req_addr = align_up(mapping1, MAX2(alignment, lp));
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, req_addr, !ExecMem, mtTest);
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, req_addr, false);
|
||||
if (p != nullptr) {
|
||||
EXPECT_EQ(p, req_addr);
|
||||
small_page_write(p, size);
|
||||
@ -311,7 +311,7 @@ class TestReserveMemorySpecial : AllStatic {
|
||||
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
|
||||
// req_addr must be at least large page aligned.
|
||||
char* const req_addr = align_up(mapping2, MAX2(alignment, lp));
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, req_addr, !ExecMem, mtTest);
|
||||
char* p = os::reserve_memory_special(size, alignment, lp, req_addr, false);
|
||||
// as the area around req_addr contains already existing mappings, the API should always
|
||||
// return nullptr (as per contract, it cannot return another address)
|
||||
EXPECT_TRUE(p == nullptr);
|
||||
@ -355,9 +355,9 @@ TEST_VM(os_linux, pretouch_thp_and_use_concurrent) {
|
||||
const size_t size = 1 * G;
|
||||
const bool useThp = UseTransparentHugePages;
|
||||
UseTransparentHugePages = true;
|
||||
char* const heap = os::reserve_memory(size, !ExecMem, mtInternal);
|
||||
char* const heap = os::reserve_memory(size, false, mtInternal);
|
||||
EXPECT_NE(heap, nullptr);
|
||||
EXPECT_TRUE(os::commit_memory(heap, size, !ExecMem, mtInternal));
|
||||
EXPECT_TRUE(os::commit_memory(heap, size, false));
|
||||
|
||||
{
|
||||
auto pretouch = [heap, size](Thread*, int) {
|
||||
@ -379,7 +379,7 @@ TEST_VM(os_linux, pretouch_thp_and_use_concurrent) {
|
||||
for (int i = 0; i < 1000; i++)
|
||||
EXPECT_EQ(*iptr++, i);
|
||||
|
||||
EXPECT_TRUE(os::uncommit_memory(heap, size, !ExecMem, mtInternal));
|
||||
EXPECT_TRUE(os::uncommit_memory(heap, size, false));
|
||||
EXPECT_TRUE(os::release_memory(heap, size));
|
||||
UseTransparentHugePages = useThp;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ static size_t allocation_granularity() {
|
||||
<< " bytes: " << bytes << " alignment: " << alignment << " randomized: " << randomized
|
||||
|
||||
static char* call_attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomized) {
|
||||
char* const addr = os::attempt_reserve_memory_between(min, max, bytes, alignment, randomized, mtTest);
|
||||
char* const addr = os::attempt_reserve_memory_between(min, max, bytes, alignment, randomized);
|
||||
if (addr != nullptr) {
|
||||
EXPECT_TRUE(is_aligned(addr, alignment)) << ERRINFO;
|
||||
EXPECT_TRUE(is_aligned(addr, allocation_granularity())) << ERRINFO;
|
||||
@ -158,7 +158,7 @@ public:
|
||||
// the hole.
|
||||
const uintptr_t candidate = nth_bit(i);
|
||||
if ((candidate + _len) <= ARMB_constants::absolute_max) {
|
||||
_base = os::attempt_reserve_memory_at((char*)candidate, _len, !ExecMem, mtTest);
|
||||
_base = os::attempt_reserve_memory_at((char*)candidate, _len);
|
||||
}
|
||||
}
|
||||
if (_base == nullptr) {
|
||||
@ -166,8 +166,8 @@ public:
|
||||
}
|
||||
// Release total mapping, remap the individual non-holy parts
|
||||
os::release_memory(_base, _len);
|
||||
_p1 = os::attempt_reserve_memory_at(_base + _p1_offset, _p1_size, !ExecMem, mtTest);
|
||||
_p2 = os::attempt_reserve_memory_at(_base + _p2_offset, _p2_size, !ExecMem, mtTest);
|
||||
_p1 = os::attempt_reserve_memory_at(_base + _p1_offset, _p1_size);
|
||||
_p2 = os::attempt_reserve_memory_at(_base + _p2_offset, _p2_size);
|
||||
if (_p1 == nullptr || _p2 == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ void TestReserveMemorySpecial_test() {
|
||||
FLAG_SET_CMDLINE(UseNUMAInterleaving, false);
|
||||
|
||||
const size_t large_allocation_size = os::large_page_size() * 4;
|
||||
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), os::large_page_size(), nullptr, !ExecMem, mtTest);
|
||||
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), os::large_page_size(), nullptr, false);
|
||||
if (result == nullptr) {
|
||||
// failed to allocate memory, skipping the test
|
||||
return;
|
||||
@ -77,12 +77,12 @@ void TestReserveMemorySpecial_test() {
|
||||
// Reserve another page within the recently allocated memory area. This should fail
|
||||
const size_t expected_allocation_size = os::large_page_size();
|
||||
char* expected_location = result + os::large_page_size();
|
||||
char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), os::large_page_size(), expected_location, !ExecMem, mtTest);
|
||||
char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), os::large_page_size(), expected_location, false);
|
||||
EXPECT_TRUE(actual_location == nullptr) << "Should not be allowed to reserve within present reservation";
|
||||
|
||||
// Instead try reserving after the first reservation.
|
||||
expected_location = result + large_allocation_size;
|
||||
actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), os::large_page_size(), expected_location, !ExecMem, mtTest);
|
||||
actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), os::large_page_size(), expected_location, false);
|
||||
EXPECT_TRUE(actual_location != nullptr) << "Unexpected reservation failure, can’t verify correct location";
|
||||
EXPECT_TRUE(actual_location == expected_location) << "Reservation must be at requested location";
|
||||
MemoryReleaser m2(actual_location, os::large_page_size());
|
||||
@ -90,7 +90,7 @@ void TestReserveMemorySpecial_test() {
|
||||
// Now try to do a reservation with a larger alignment.
|
||||
const size_t alignment = os::large_page_size() * 2;
|
||||
const size_t new_large_size = alignment * 4;
|
||||
char* aligned_request = os::reserve_memory_special(new_large_size, alignment, os::large_page_size(), nullptr, !ExecMem, mtTest);
|
||||
char* aligned_request = os::reserve_memory_special(new_large_size, alignment, os::large_page_size(), nullptr, false);
|
||||
EXPECT_TRUE(aligned_request != nullptr) << "Unexpected reservation failure, can’t verify correct alignment";
|
||||
EXPECT_TRUE(is_aligned(aligned_request, alignment)) << "Returned address must be aligned";
|
||||
MemoryReleaser m3(aligned_request, new_large_size);
|
||||
|
@ -93,7 +93,7 @@ public:
|
||||
static void test_add_committed_region_adjacent() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -167,7 +167,7 @@ public:
|
||||
static void test_add_committed_region_adjacent_overlapping() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -254,7 +254,7 @@ public:
|
||||
static void test_add_committed_region_overlapping() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
@ -425,7 +425,7 @@ public:
|
||||
static void test_remove_uncommitted_region() {
|
||||
|
||||
size_t size = 0x01000000;
|
||||
ReservedSpace rs(size, mtTest);
|
||||
ReservedSpace rs(size);
|
||||
address addr = (address)rs.base();
|
||||
|
||||
address frame1 = (address)0x1234;
|
||||
|
Loading…
x
Reference in New Issue
Block a user