8253638: Cleanup os::reserve_memory and remove MAP_FIXED
Reviewed-by: stuefe, iklam
This commit is contained in:
parent
f014854ac7
commit
625a9352bf
@ -1728,7 +1728,7 @@ static void local_sem_init() {
|
||||
} else {
|
||||
// Memory semaphores must live in shared mem.
|
||||
guarantee0(p_sig_msem == NULL);
|
||||
p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
|
||||
p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore));
|
||||
guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
|
||||
guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
|
||||
}
|
||||
@ -2347,19 +2347,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
|
||||
}
|
||||
|
||||
// Reserves and attaches a shared memory segment.
|
||||
// Will assert if a wish address is given and could not be obtained.
|
||||
char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
|
||||
|
||||
// All other Unices do a mmap(MAP_FIXED) if the addr is given,
|
||||
// thereby clobbering old mappings at that place. That is probably
|
||||
// not intended, never used and almost certainly an error were it
|
||||
// ever be used this way (to try attaching at a specified address
|
||||
// without clobbering old mappings an alternate API exists,
|
||||
// os::attempt_reserve_memory_at()).
|
||||
// Instead of mimicking the dangerous coding of the other platforms, here I
|
||||
// just ignore the request address (release) or assert(debug).
|
||||
assert0(requested_addr == NULL);
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
const size_t alignment_hint0 =
|
||||
@ -2368,12 +2356,12 @@ char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment
|
||||
// In 4K mode always use mmap.
|
||||
// In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
|
||||
if (os::vm_page_size() == 4*K) {
|
||||
return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
} else {
|
||||
if (bytes >= Use64KPagesThreshold) {
|
||||
return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
|
||||
return reserve_shmated_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
} else {
|
||||
return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
|
||||
return reserve_mmaped_memory(bytes, NULL /* requested_addr */, alignment_hint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2011,27 +2011,17 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||
// location, however, they will be overwritten. If 'fixed' is false,
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
// may not start from the requested address. Unlike Bsd mmap(), this
|
||||
// function returns NULL to indicate failure.
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
char * addr;
|
||||
int flags;
|
||||
|
||||
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
|
||||
if (fixed) {
|
||||
assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes) {
|
||||
// MAP_FIXED is intentionally left out, to leave existing mappings intact.
|
||||
const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
|
||||
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
// succeed if we have enough swap space to back the physical page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
|
||||
|
||||
return addr == MAP_FAILED ? NULL : addr;
|
||||
}
|
||||
@ -2040,9 +2030,9 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
|
||||
size_t alignment_hint) {
|
||||
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
return anon_mmap(NULL /* addr */, bytes);
|
||||
}
|
||||
|
||||
bool os::pd_release_memory(char* addr, size_t size) {
|
||||
@ -2152,7 +2142,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
|
||||
// Bsd mmap allows caller to pass an address as hint; give it a try first,
|
||||
// if kernel honors the hint then we can return immediately.
|
||||
char * addr = anon_mmap(requested_addr, bytes, false);
|
||||
char * addr = anon_mmap(requested_addr, bytes);
|
||||
if (addr == requested_addr) {
|
||||
return requested_addr;
|
||||
}
|
||||
|
@ -3638,27 +3638,17 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::uncommit_memory(addr, size);
|
||||
}
|
||||
|
||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||
// location, however, they will be overwritten. If 'fixed' is false,
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
// may not start from the requested address. Unlike Linux mmap(), this
|
||||
// function returns NULL to indicate failure.
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
char * addr;
|
||||
int flags;
|
||||
|
||||
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
|
||||
if (fixed) {
|
||||
assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes) {
|
||||
// MAP_FIXED is intentionally left out, to leave existing mappings intact.
|
||||
const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
|
||||
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
// succeed if we have enough swap space to back the physical page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
|
||||
|
||||
return addr == MAP_FAILED ? NULL : addr;
|
||||
}
|
||||
@ -3671,19 +3661,14 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
// It must be a multiple of allocation granularity.
|
||||
// Returns address of memory or NULL. If req_addr was not NULL, will only return
|
||||
// req_addr or NULL.
|
||||
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
|
||||
|
||||
static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) {
|
||||
size_t extra_size = bytes;
|
||||
if (req_addr == NULL && alignment > 0) {
|
||||
extra_size += alignment;
|
||||
}
|
||||
|
||||
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
|
||||
-1, 0);
|
||||
if (start == MAP_FAILED) {
|
||||
start = NULL;
|
||||
} else {
|
||||
char* start = anon_mmap(req_addr, bytes);
|
||||
if (start != NULL) {
|
||||
if (req_addr != NULL) {
|
||||
if (start != req_addr) {
|
||||
::munmap(start, extra_size);
|
||||
@ -3709,9 +3694,9 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
|
||||
size_t alignment_hint) {
|
||||
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
return anon_mmap(NULL, bytes);
|
||||
}
|
||||
|
||||
bool os::pd_release_memory(char* addr, size_t size) {
|
||||
@ -4069,7 +4054,7 @@ static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
|
||||
// To ensure that we get 'alignment' aligned memory from shmat,
|
||||
// we pre-reserve aligned virtual memory and then attach to that.
|
||||
|
||||
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
|
||||
char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment);
|
||||
if (pre_reserved_addr == NULL) {
|
||||
// Couldn't pre-reserve aligned memory.
|
||||
shm_warning("Failed to pre-reserve aligned memory for shmat.");
|
||||
@ -4238,7 +4223,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
|
||||
assert(is_aligned(bytes, alignment), "Must be");
|
||||
|
||||
// First reserve - but not commit - the address range in small pages.
|
||||
char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
|
||||
char* const start = anon_mmap_aligned(req_addr, bytes, alignment);
|
||||
|
||||
if (start == NULL) {
|
||||
return NULL;
|
||||
@ -4418,7 +4403,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
|
||||
// Linux mmap allows caller to pass an address as hint; give it a try first,
|
||||
// if kernel honors the hint then we can return immediately.
|
||||
char * addr = anon_mmap(requested_addr, bytes, false);
|
||||
char * addr = anon_mmap(requested_addr, bytes);
|
||||
if (addr == requested_addr) {
|
||||
return requested_addr;
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
extra_base = os::reserve_memory(extra_size, NULL, alignment);
|
||||
extra_base = os::reserve_memory(extra_size, alignment);
|
||||
}
|
||||
|
||||
if (extra_base == NULL) {
|
||||
|
@ -3063,8 +3063,8 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
|
||||
assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
|
||||
|
||||
release_memory(base, size);
|
||||
reserve_memory(split, base);
|
||||
reserve_memory(size - split, split_address);
|
||||
attempt_reserve_memory_at(split, base);
|
||||
attempt_reserve_memory_at(size - split, split_address);
|
||||
|
||||
// NMT: nothing to do here. Since Windows implements the split by
|
||||
// releasing and re-reserving memory, the parts are already registered
|
||||
@ -3086,7 +3086,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
char* aligned_base = NULL;
|
||||
|
||||
do {
|
||||
char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
|
||||
char* extra_base = os::reserve_memory_with_fd(extra_size, alignment, file_desc);
|
||||
if (extra_base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -3099,14 +3099,21 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
os::release_memory(extra_base, extra_size);
|
||||
}
|
||||
|
||||
aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
|
||||
aligned_base = os::attempt_reserve_memory_at(size, aligned_base, file_desc);
|
||||
|
||||
} while (aligned_base == NULL);
|
||||
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
||||
char* os::pd_reserve_memory(size_t bytes, size_t alignment_hint) {
|
||||
// Ignores alignment hint
|
||||
return pd_attempt_reserve_memory_at(bytes, NULL /* addr */);
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* addr) {
|
||||
assert((size_t)addr % os::vm_allocation_granularity() == 0,
|
||||
"reserve alignment");
|
||||
assert(bytes % os::vm_page_size() == 0, "reserve page size");
|
||||
@ -3137,14 +3144,6 @@ char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
||||
return res;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
// Windows os::reserve_memory() fails of the requested address range is
|
||||
// not avilable.
|
||||
return reserve_memory(bytes, requested_addr);
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
return map_memory_to_file(requested_addr, bytes, file_desc);
|
||||
|
@ -791,7 +791,7 @@ bool os::is_allocatable(size_t bytes) {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* addr = reserve_memory(bytes, NULL);
|
||||
char* addr = reserve_memory(bytes);
|
||||
|
||||
if (addr != NULL) {
|
||||
release_memory(addr, bytes);
|
||||
|
@ -247,7 +247,7 @@ bool os::is_allocatable(size_t bytes) {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* addr = reserve_memory(bytes, NULL);
|
||||
char* addr = reserve_memory(bytes);
|
||||
|
||||
if (addr != NULL) {
|
||||
release_memory(addr, bytes);
|
||||
|
@ -650,7 +650,7 @@ bool os::is_allocatable(size_t bytes) {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* addr = reserve_memory(bytes, NULL);
|
||||
char* addr = reserve_memory(bytes);
|
||||
|
||||
if (addr != NULL) {
|
||||
release_memory(addr, bytes);
|
||||
|
@ -273,7 +273,7 @@ bool os::is_allocatable(size_t bytes) {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* addr = reserve_memory(bytes, NULL);
|
||||
char* addr = reserve_memory(bytes);
|
||||
|
||||
if (addr != NULL) {
|
||||
release_memory(addr, bytes);
|
||||
|
@ -43,7 +43,7 @@ ZMarkStackSpace::ZMarkStackSpace() :
|
||||
// Reserve address space
|
||||
const size_t size = ZMarkStackSpaceLimit;
|
||||
const size_t alignment = (size_t)os::vm_allocation_granularity();
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, NULL, alignment, mtGC);
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, alignment, mtGC);
|
||||
if (addr == 0) {
|
||||
log_error_pd(gc, marking)("Failed to reserve address space for mark stacks");
|
||||
return;
|
||||
|
@ -57,7 +57,7 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
|
||||
char* addr = os::reserve_memory(size, NULL, alignment, flags);
|
||||
char* addr = os::reserve_memory(size, alignment, flags);
|
||||
if (addr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -75,7 +75,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
|
||||
char* addr = os::reserve_memory(size, NULL, alignment, flags);
|
||||
char* addr = os::reserve_memory(size, alignment, flags);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
base = NULL;
|
||||
}
|
||||
} else {
|
||||
base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
|
||||
base = os::reserve_memory_with_fd(size, alignment, _fd_for_heap);
|
||||
}
|
||||
|
||||
if (base == NULL) return;
|
||||
@ -382,7 +382,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
|
||||
if (requested_address != 0) {
|
||||
base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
|
||||
} else {
|
||||
base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
|
||||
base = os::reserve_memory_with_fd(size, alignment, _fd_for_heap);
|
||||
}
|
||||
}
|
||||
if (base == NULL) { return; }
|
||||
|
@ -1473,7 +1473,7 @@ WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
|
||||
static char c;
|
||||
static volatile char* p;
|
||||
|
||||
p = os::reserve_memory(os::vm_allocation_granularity(), NULL, 0);
|
||||
p = os::reserve_memory(os::vm_allocation_granularity());
|
||||
if (p == NULL) {
|
||||
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory");
|
||||
}
|
||||
|
@ -1652,32 +1652,33 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
return os::pd_create_stack_guard_pages(addr, bytes);
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
|
||||
char* result = NULL;
|
||||
|
||||
if (file_desc != -1) {
|
||||
// Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
|
||||
// but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
|
||||
result = os::map_memory_to_file(addr, bytes, file_desc);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
char* os::reserve_memory(size_t bytes, size_t alignment_hint, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC);
|
||||
if (flags != mtOther) {
|
||||
MemTracker::record_virtual_memory_type(result, flags);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
||||
MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_type((address)result, flags);
|
||||
char* os::reserve_memory_with_fd(size_t bytes, size_t alignment_hint, int file_desc) {
|
||||
char* result;
|
||||
|
||||
if (file_desc != -1) {
|
||||
// Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
|
||||
// but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
|
||||
result = os::map_memory_to_file(NULL /* addr */, bytes, file_desc);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
result = pd_reserve_memory(bytes, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -113,10 +113,11 @@ class os: AllStatic {
|
||||
_page_sizes[1] = 0; // sentinel
|
||||
}
|
||||
|
||||
static char* pd_reserve_memory(size_t bytes, char* addr = 0,
|
||||
size_t alignment_hint = 0);
|
||||
static char* pd_reserve_memory(size_t bytes, size_t alignment_hint);
|
||||
|
||||
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
|
||||
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc);
|
||||
|
||||
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable);
|
||||
@ -310,13 +311,21 @@ class os: AllStatic {
|
||||
const size_t size);
|
||||
|
||||
static int vm_allocation_granularity();
|
||||
static char* reserve_memory(size_t bytes, char* addr = 0,
|
||||
size_t alignment_hint = 0, int file_desc = -1);
|
||||
static char* reserve_memory(size_t bytes, char* addr,
|
||||
size_t alignment_hint, MEMFLAGS flags);
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
|
||||
static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
|
||||
|
||||
// Reserves virtual memory.
|
||||
// alignment_hint - currently only used by AIX
|
||||
static char* reserve_memory(size_t bytes, size_t alignment_hint = 0, MEMFLAGS flags = mtOther);
|
||||
|
||||
// Reserves virtual memory.
|
||||
// if file_desc != -1, also attaches the memory to the file.
|
||||
static char* reserve_memory_with_fd(size_t bytes, size_t alignment_hint, int file_desc);
|
||||
|
||||
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
|
||||
|
||||
// Attempts to reserve the virtual memory at [addr, addr + bytes).
|
||||
// Does not overwrite existing mappings.
|
||||
static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
|
||||
|
||||
// Split a reserved memory region [base, base+size) into two regions [base, base+split) and
|
||||
// [base+split, base+size).
|
||||
|
@ -47,7 +47,7 @@ void SafepointMechanism::default_initialize() {
|
||||
// Polling page
|
||||
const size_t page_size = os::vm_page_size();
|
||||
const size_t allocation_size = 2 * page_size;
|
||||
char* polling_page = os::reserve_memory(allocation_size, NULL, page_size);
|
||||
char* polling_page = os::reserve_memory(allocation_size, page_size);
|
||||
os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
|
||||
MemTracker::record_virtual_memory_type((address)polling_page, mtSafepoint);
|
||||
|
||||
|
@ -101,7 +101,7 @@ public:
|
||||
static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, NULL, page_sz, mtThreadStack);
|
||||
char* base = os::reserve_memory(size, page_sz, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, false);
|
||||
size_t index;
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
@ -169,7 +169,7 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t num_pages = 4;
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, NULL, page_sz, mtTest);
|
||||
char* base = os::reserve_memory(size, page_sz, mtTest);
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
result = os::commit_memory(base, size, false);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user