8330144: Revise os::free_memory()

Reviewed-by: stuefe, mbaesken
This commit is contained in:
Robert Toyonaga 2024-07-18 13:35:32 +00:00 committed by Thomas Stuefe
parent 35df48e1b3
commit 4a73ed44f1
10 changed files with 38 additions and 20 deletions

View File

@ -1846,7 +1846,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
void os::pd_disclaim_memory(char *addr, size_t bytes) {
}
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {

View File

@ -1684,7 +1684,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
void os::pd_disclaim_memory(char *addr, size_t bytes) {
::madvise(addr, bytes, MADV_DONTNEED);
}

View File

@ -3035,15 +3035,10 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}
}
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
// This method works by doing an mmap over an existing mmaping and effectively discarding
// the existing pages. However it won't work for SHM-based large pages that cannot be
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) {
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
// Hints to the OS that the memory is no longer needed and may be reclaimed by the OS when convenient.
// The memory will be re-acquired on touch without needing explicit recommitting.
void os::pd_disclaim_memory(char *addr, size_t bytes) {
::madvise(addr, bytes, MADV_DONTNEED);
}
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {

View File

@ -3893,7 +3893,7 @@ bool os::unguard_memory(char* addr, size_t bytes) {
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
void os::pd_disclaim_memory(char *addr, size_t bytes) { }
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
return page_size;

View File

@ -213,7 +213,7 @@ void MutableNUMASpace::bias_region(MemRegion mr, uint lgrp_id) {
// Then we uncommit the pages in the range.
// The alignment_hint argument must be less than or equal to the small page
// size if not using large pages or else this function does nothing.
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align);
os::disclaim_memory((char*)aligned_region.start(), aligned_region.byte_size());
// And make them local/first-touch biased.
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), checked_cast<int>(lgrp_id));
}

View File

@ -53,7 +53,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_s
size_t size = pointer_delta(end, start, sizeof(char));
if (clear_space) {
// Prefer page reallocation to migration.
os::free_memory((char*)start, size, page_size);
os::disclaim_memory((char*)start, size);
}
os::numa_make_global((char*)start, size);
}

View File

@ -2271,8 +2271,8 @@ bool os::unmap_memory(char *addr, size_t bytes) {
return result;
}
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
pd_free_memory(addr, bytes, alignment_hint);
void os::disclaim_memory(char *addr, size_t bytes) {
pd_disclaim_memory(addr, bytes);
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {

View File

@ -231,7 +231,7 @@ class os: AllStatic {
char *addr, size_t bytes, bool read_only = false,
bool allow_exec = false);
static bool pd_unmap_memory(char *addr, size_t bytes);
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
static void pd_disclaim_memory(char *addr, size_t bytes);
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
// Returns 0 if pretouch is done via platform dependent method, or otherwise
@ -520,7 +520,7 @@ class os: AllStatic {
char *addr, size_t bytes, bool read_only = false,
bool allow_exec = false, MEMFLAGS flags = mtNone);
static bool unmap_memory(char *addr, size_t bytes);
static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
static void disclaim_memory(char *addr, size_t bytes);
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
// NUMA-specific interface

View File

@ -132,7 +132,7 @@ public:
}
// Cleanup
os::free_memory(base, size, page_sz);
os::disclaim_memory(base, size);
VirtualMemoryTracker::remove_released_region((address)base, size);
rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));

View File

@ -20,7 +20,6 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
@ -977,3 +976,27 @@ TEST_VM(os, vm_min_address) {
#endif
}
#if !defined(_WINDOWS) && !defined(_AIX)
TEST_VM(os, free_without_uncommit) {
const size_t page_sz = os::vm_page_size();
const size_t pages = 64;
const size_t size = pages * page_sz;
char* base = os::reserve_memory(size, false, mtTest);
ASSERT_NE(base, (char*) nullptr);
ASSERT_TRUE(os::commit_memory(base, size, false));
for (size_t index = 0; index < pages; index++) {
base[index * page_sz] = 'a';
}
os::disclaim_memory(base, size);
// Ensure we can still use the memory without having to recommit.
for (size_t index = 0; index < pages; index++) {
base[index * page_sz] = 'a';
}
os::release_memory(base, size);
}
#endif