8253650: Cleanup: remove alignment_hint parameter from os::reserve_memory
Reviewed-by: stefank, tschatzl
This commit is contained in:
parent
ed62b0104f
commit
44e6820c37
src/hotspot
os
share
test/hotspot/gtest/runtime
@ -1925,21 +1925,10 @@ static void vmembk_print_on(outputStream* os) {
|
||||
// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
|
||||
// address. Failing that, it will attach the memory anywhere.
|
||||
// If <requested_addr> is NULL, function will attach the memory anywhere.
|
||||
//
|
||||
// <alignment_hint> is being ignored by this function. It is very probable however that the
|
||||
// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
|
||||
// Should this be not enogh, we can put more work into it.
|
||||
static char* reserve_shmated_memory (
|
||||
size_t bytes,
|
||||
char* requested_addr,
|
||||
size_t alignment_hint) {
|
||||
static char* reserve_shmated_memory (size_t bytes, char* requested_addr) {
|
||||
|
||||
trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
|
||||
PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
|
||||
bytes, p2i(requested_addr), alignment_hint);
|
||||
|
||||
// Either give me wish address or wish alignment but not both.
|
||||
assert0(!(requested_addr != NULL && alignment_hint != 0));
|
||||
PTR_FORMAT "...", bytes, p2i(requested_addr));
|
||||
|
||||
// We must prevent anyone from attaching too close to the
|
||||
// BRK because that may cause malloc OOM.
|
||||
@ -2061,15 +2050,10 @@ static bool uncommit_shmated_memory(char* addr, size_t size) {
|
||||
// Reserve memory via mmap.
|
||||
// If <requested_addr> is given, an attempt is made to attach at the given address.
|
||||
// Failing that, memory is allocated at any address.
|
||||
// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
|
||||
// allocate at an address aligned with the given alignment. Failing that, memory
|
||||
// is aligned anywhere.
|
||||
static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
|
||||
trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
|
||||
"alignment_hint " UINTX_FORMAT "...",
|
||||
bytes, p2i(requested_addr), alignment_hint);
|
||||
static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
|
||||
trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT "...",
|
||||
bytes, p2i(requested_addr));
|
||||
|
||||
// If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
|
||||
if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
|
||||
trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", p2i(requested_addr));
|
||||
return NULL;
|
||||
@ -2084,26 +2068,21 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
|
||||
requested_addr = NULL;
|
||||
}
|
||||
|
||||
// Specify one or the other but not both.
|
||||
assert0(!(requested_addr != NULL && alignment_hint > 0));
|
||||
|
||||
// In 64K mode, we claim the global page size (os::vm_page_size())
|
||||
// is 64K. This is one of the few points where that illusion may
|
||||
// break, because mmap() will always return memory aligned to 4K. So
|
||||
// we must ensure we only ever return memory aligned to 64k.
|
||||
if (alignment_hint) {
|
||||
alignment_hint = lcm(alignment_hint, os::vm_page_size());
|
||||
} else {
|
||||
alignment_hint = os::vm_page_size();
|
||||
}
|
||||
// In 64K mode, we lie and claim the global page size (os::vm_page_size()) is 64K
|
||||
// (complicated story). This mostly works just fine since 64K is a multiple of the
|
||||
// actual 4K lowest page size. Only at a few seams light shines thru, e.g. when
|
||||
// calling mmap. mmap will return memory aligned to the lowest pages size - 4K -
|
||||
// so we must make sure - transparently - that the caller only ever sees 64K
|
||||
// aligned mapping start addresses.
|
||||
const size_t alignment = os::vm_page_size();
|
||||
|
||||
// Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
|
||||
const size_t size = align_up(bytes, os::vm_page_size());
|
||||
|
||||
// alignment: Allocate memory large enough to include an aligned range of the right size and
|
||||
// cut off the leading and trailing waste pages.
|
||||
assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
|
||||
const size_t extra_size = size + alignment_hint;
|
||||
assert0(alignment != 0 && is_aligned_to(alignment, os::vm_page_size())); // see above
|
||||
const size_t extra_size = size + alignment;
|
||||
|
||||
// Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
|
||||
// later use msync(MS_INVALIDATE) (see os::uncommit_memory).
|
||||
@ -2131,7 +2110,7 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
|
||||
}
|
||||
|
||||
// Handle alignment.
|
||||
char* const addr_aligned = align_up(addr, alignment_hint);
|
||||
char* const addr_aligned = align_up(addr, alignment);
|
||||
const size_t waste_pre = addr_aligned - addr;
|
||||
char* const addr_aligned_end = addr_aligned + size;
|
||||
const size_t waste_post = extra_size - waste_pre - size;
|
||||
@ -2347,21 +2326,19 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
|
||||
}
|
||||
|
||||
// Reserves and attaches a shared memory segment.
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
const size_t alignment_hint0 =
|
||||
alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
|
||||
|
||||
// In 4K mode always use mmap.
|
||||
// In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
|
||||
if (os::vm_page_size() == 4*K) {
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */);
|
||||
} else {
|
||||
if (bytes >= Use64KPagesThreshold) {
|
||||
return reserve_shmated_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
return reserve_shmated_memory(bytes, NULL /* requested_addr */);
|
||||
} else {
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2538,7 +2515,7 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, int f
|
||||
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
result = reserve_mmaped_memory(bytes, requested_addr, 0);
|
||||
result = reserve_mmaped_memory(bytes, requested_addr);
|
||||
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
@ -2559,12 +2536,12 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
|
||||
// In 4K mode always use mmap.
|
||||
// In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
|
||||
if (os::vm_page_size() == 4*K) {
|
||||
return reserve_mmaped_memory(bytes, requested_addr, 0);
|
||||
return reserve_mmaped_memory(bytes, requested_addr);
|
||||
} else {
|
||||
if (bytes >= Use64KPagesThreshold) {
|
||||
return reserve_shmated_memory(bytes, requested_addr, 0);
|
||||
return reserve_shmated_memory(bytes, requested_addr);
|
||||
} else {
|
||||
return reserve_mmaped_memory(bytes, requested_addr, 0);
|
||||
return reserve_mmaped_memory(bytes, requested_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2030,8 +2030,7 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
return anon_mmap(NULL /* addr */, bytes);
|
||||
}
|
||||
|
||||
|
@ -3694,8 +3694,7 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
return anon_mmap(NULL, bytes);
|
||||
}
|
||||
|
||||
|
@ -316,17 +316,18 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
|
||||
char* extra_base;
|
||||
if (file_desc != -1) {
|
||||
// For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
|
||||
// we need to deal with shrinking of the file space later when we release extra memory after alignment.
|
||||
// We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
|
||||
// So here to call a helper function while reserve memory for us. After we have a aligned base,
|
||||
// we will replace anonymous mapping with file mapping.
|
||||
// For file mapping, we do not call os:reserve_memory_with_fd since:
|
||||
// - we later chop away parts of the mapping using os::release_memory and that could fail if the
|
||||
// original mmap call had been tied to an fd.
|
||||
// - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
|
||||
// mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
|
||||
// chopping off and unmapping excess bits back and front (see below) would not work.
|
||||
extra_base = reserve_mmapped_memory(extra_size, NULL);
|
||||
if (extra_base != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
extra_base = os::reserve_memory(extra_size, alignment);
|
||||
extra_base = os::reserve_memory(extra_size);
|
||||
}
|
||||
|
||||
if (extra_base == NULL) {
|
||||
|
@ -3086,7 +3086,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
char* aligned_base = NULL;
|
||||
|
||||
do {
|
||||
char* extra_base = os::reserve_memory_with_fd(extra_size, alignment, file_desc);
|
||||
char* extra_base = os::reserve_memory_with_fd(extra_size, file_desc);
|
||||
if (extra_base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -3106,8 +3106,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
return pd_attempt_reserve_memory_at(NULL /* addr */, bytes);
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,7 @@ ZMarkStackSpace::ZMarkStackSpace() :
|
||||
|
||||
// Reserve address space
|
||||
const size_t size = ZMarkStackSpaceLimit;
|
||||
const size_t alignment = (size_t)os::vm_allocation_granularity();
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, alignment, mtGC);
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, mtGC);
|
||||
if (addr == 0) {
|
||||
log_error_pd(gc, marking)("Failed to reserve address space for mark stacks");
|
||||
return;
|
||||
|
@ -55,9 +55,8 @@ size_t MmapArrayAllocator<E>::size_for(size_t length) {
|
||||
template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
|
||||
char* addr = os::reserve_memory(size, alignment, flags);
|
||||
char* addr = os::reserve_memory(size, flags);
|
||||
if (addr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -73,9 +72,8 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
|
||||
template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
|
||||
char* addr = os::reserve_memory(size, alignment, flags);
|
||||
char* addr = os::reserve_memory(size, flags);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
}
|
||||
|
||||
if (base == NULL) {
|
||||
// Optimistically assume that the OSes returns an aligned base pointer.
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
|
||||
@ -194,7 +194,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
base = NULL;
|
||||
}
|
||||
} else {
|
||||
base = os::reserve_memory_with_fd(size, alignment, _fd_for_heap);
|
||||
base = os::reserve_memory_with_fd(size, _fd_for_heap);
|
||||
}
|
||||
|
||||
if (base == NULL) return;
|
||||
@ -371,18 +371,14 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
|
||||
log_debug(gc, heap, coops)("Reserve regular memory without large pages");
|
||||
}
|
||||
|
||||
// Optimistically assume that the OSes returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid over mapping something
|
||||
// important. If available space is not detected, return NULL.
|
||||
|
||||
if (requested_address != 0) {
|
||||
base = os::attempt_reserve_memory_at(requested_address, size, _fd_for_heap);
|
||||
} else {
|
||||
base = os::reserve_memory_with_fd(size, alignment, _fd_for_heap);
|
||||
// Optimistically assume that the OSes returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
// If the returned memory is not aligned we will release and retry.
|
||||
base = os::reserve_memory_with_fd(size, _fd_for_heap);
|
||||
}
|
||||
}
|
||||
if (base == NULL) { return; }
|
||||
|
@ -1652,8 +1652,8 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
return os::pd_create_stack_guard_pages(addr, bytes);
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, size_t alignment_hint, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, alignment_hint);
|
||||
char* os::reserve_memory(size_t bytes, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC);
|
||||
if (flags != mtOther) {
|
||||
@ -1664,7 +1664,7 @@ char* os::reserve_memory(size_t bytes, size_t alignment_hint, MEMFLAGS flags) {
|
||||
return result;
|
||||
}
|
||||
|
||||
char* os::reserve_memory_with_fd(size_t bytes, size_t alignment_hint, int file_desc) {
|
||||
char* os::reserve_memory_with_fd(size_t bytes, int file_desc) {
|
||||
char* result;
|
||||
|
||||
if (file_desc != -1) {
|
||||
@ -1675,7 +1675,7 @@ char* os::reserve_memory_with_fd(size_t bytes, size_t alignment_hint, int file_d
|
||||
MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
result = pd_reserve_memory(bytes, alignment_hint);
|
||||
result = pd_reserve_memory(bytes);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC);
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ class os: AllStatic {
|
||||
_page_sizes[1] = 0; // sentinel
|
||||
}
|
||||
|
||||
static char* pd_reserve_memory(size_t bytes, size_t alignment_hint);
|
||||
static char* pd_reserve_memory(size_t bytes);
|
||||
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes);
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, int file_desc);
|
||||
@ -314,11 +314,11 @@ class os: AllStatic {
|
||||
|
||||
// Reserves virtual memory.
|
||||
// alignment_hint - currently only used by AIX
|
||||
static char* reserve_memory(size_t bytes, size_t alignment_hint = 0, MEMFLAGS flags = mtOther);
|
||||
static char* reserve_memory(size_t bytes, MEMFLAGS flags = mtOther);
|
||||
|
||||
// Reserves virtual memory.
|
||||
// if file_desc != -1, also attaches the memory to the file.
|
||||
static char* reserve_memory_with_fd(size_t bytes, size_t alignment_hint, int file_desc);
|
||||
static char* reserve_memory_with_fd(size_t bytes, int file_desc);
|
||||
|
||||
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
|
||||
|
@ -47,7 +47,7 @@ void SafepointMechanism::default_initialize() {
|
||||
// Polling page
|
||||
const size_t page_size = os::vm_page_size();
|
||||
const size_t allocation_size = 2 * page_size;
|
||||
char* polling_page = os::reserve_memory(allocation_size, page_size);
|
||||
char* polling_page = os::reserve_memory(allocation_size);
|
||||
os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
|
||||
MemTracker::record_virtual_memory_type((address)polling_page, mtSafepoint);
|
||||
|
||||
|
@ -101,7 +101,7 @@ public:
|
||||
static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, page_sz, mtThreadStack);
|
||||
char* base = os::reserve_memory(size, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, false);
|
||||
size_t index;
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
@ -169,7 +169,7 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t num_pages = 4;
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, page_sz, mtTest);
|
||||
char* base = os::reserve_memory(size, mtTest);
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
result = os::commit_memory(base, size, false);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user