8234930: Use MAP_JIT when allocating pages for code cache on macOS
Reviewed-by: stuefe, iklam, burban
This commit is contained in:
parent
da2415fed5
commit
2273f9555a
@ -1958,7 +1958,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
pd_commit_memory_or_exit(addr, size, exec, mesg);
|
||||
}
|
||||
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
|
||||
assert(is_aligned_to(addr, os::vm_page_size()),
|
||||
"addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
|
||||
p2i(addr), os::vm_page_size());
|
||||
@ -2035,7 +2035,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
|
||||
}
|
||||
|
||||
// Reserves and attaches a shared memory segment.
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
|
||||
@ -2236,7 +2236,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
char* addr = NULL;
|
||||
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
|
@ -1678,12 +1678,25 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
|
||||
// problem.
|
||||
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
#ifdef __OpenBSD__
|
||||
#if defined(__OpenBSD__)
|
||||
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
|
||||
Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
|
||||
if (::mprotect(addr, size, prot) == 0) {
|
||||
return true;
|
||||
}
|
||||
#elif defined(__APPLE__)
|
||||
if (exec) {
|
||||
// Do not replace MAP_JIT mappings, see JDK-8234930
|
||||
if (::mprotect(addr, size, prot) == 0) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
|
||||
if (res != (uintptr_t) MAP_FAILED) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#else
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
|
||||
@ -1766,11 +1779,22 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
|
||||
}
|
||||
|
||||
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||
#ifdef __OpenBSD__
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
|
||||
#if defined(__OpenBSD__)
|
||||
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
|
||||
Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
|
||||
return ::mprotect(addr, size, PROT_NONE) == 0;
|
||||
#elif defined(__APPLE__)
|
||||
if (exec) {
|
||||
if (::madvise(addr, size, MADV_FREE) != 0) {
|
||||
return false;
|
||||
}
|
||||
return ::mprotect(addr, size, PROT_NONE) == 0;
|
||||
} else {
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
|
||||
return res != (uintptr_t) MAP_FAILED;
|
||||
}
|
||||
#else
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
|
||||
@ -1791,9 +1815,10 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
// may not start from the requested address. Unlike Bsd mmap(), this
|
||||
// function returns NULL to indicate failure.
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes) {
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes, bool exec) {
|
||||
// MAP_FIXED is intentionally left out, to leave existing mappings intact.
|
||||
const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
|
||||
const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS
|
||||
MACOS_ONLY(| (exec ? MAP_JIT : 0));
|
||||
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
@ -1807,8 +1832,8 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
return anon_mmap(NULL /* addr */, bytes);
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return anon_mmap(NULL /* addr */, bytes, exec);
|
||||
}
|
||||
|
||||
bool os::pd_release_memory(char* addr, size_t size) {
|
||||
@ -1893,7 +1918,7 @@ bool os::can_execute_large_page_memory() {
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes);
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
@ -1905,7 +1930,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
@ -1918,7 +1943,7 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
|
||||
|
||||
// Bsd mmap allows caller to pass an address as hint; give it a try first,
|
||||
// if kernel honors the hint then we can return immediately.
|
||||
char * addr = anon_mmap(requested_addr, bytes);
|
||||
char * addr = anon_mmap(requested_addr, bytes, exec);
|
||||
if (addr == requested_addr) {
|
||||
return requested_addr;
|
||||
}
|
||||
|
@ -3272,7 +3272,7 @@ struct bitmask* os::Linux::_numa_nodes_ptr;
|
||||
struct bitmask* os::Linux::_numa_interleave_bitmask;
|
||||
struct bitmask* os::Linux::_numa_membind_bitmask;
|
||||
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
|
||||
return res != (uintptr_t) MAP_FAILED;
|
||||
@ -3516,7 +3516,7 @@ static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return anon_mmap(NULL, bytes);
|
||||
}
|
||||
|
||||
@ -4197,7 +4197,7 @@ bool os::can_execute_large_page_memory() {
|
||||
|
||||
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes);
|
||||
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
@ -4209,7 +4209,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
|
@ -336,9 +336,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
|
||||
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
|
||||
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
|
||||
// rather than unmapping and remapping the whole chunk to get requested alignment.
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
size_t extra_size = calculate_aligned_extra_size(size, alignment);
|
||||
char* extra_base = os::reserve_memory(extra_size);
|
||||
char* extra_base = os::reserve_memory(extra_size, exec);
|
||||
if (extra_base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -3262,7 +3262,8 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
// exec can be ignored
|
||||
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
|
||||
}
|
||||
|
||||
@ -3270,13 +3271,13 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd) {
|
||||
return map_or_reserve_memory_aligned(size, alignment, fd);
|
||||
}
|
||||
|
||||
char* os::pd_reserve_memory(size_t bytes) {
|
||||
return pd_attempt_reserve_memory_at(NULL /* addr */, bytes);
|
||||
char* os::pd_reserve_memory(size_t bytes, bool exec) {
|
||||
return pd_attempt_reserve_memory_at(NULL /* addr */, bytes, exec);
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes) {
|
||||
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
|
||||
assert((size_t)addr % os::vm_allocation_granularity() == 0,
|
||||
"reserve alignment");
|
||||
assert(bytes % os::vm_page_size() == 0, "reserve page size");
|
||||
@ -3471,7 +3472,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
pd_commit_memory_or_exit(addr, size, exec, mesg);
|
||||
}
|
||||
|
||||
bool os::pd_uncommit_memory(char* addr, size_t bytes) {
|
||||
bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
|
||||
if (bytes == 0) {
|
||||
// Don't bother the OS with noops.
|
||||
return true;
|
||||
|
@ -42,7 +42,7 @@ ZMarkStackSpace::ZMarkStackSpace() :
|
||||
|
||||
// Reserve address space
|
||||
const size_t size = ZMarkStackSpaceLimit;
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, mtGC);
|
||||
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, !ExecMem, mtGC);
|
||||
if (addr == 0) {
|
||||
log_error_pd(gc, marking)("Failed to reserve address space for mark stacks");
|
||||
return;
|
||||
|
@ -56,7 +56,7 @@ template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
|
||||
char* addr = os::reserve_memory(size, flags);
|
||||
char* addr = os::reserve_memory(size, !ExecMem, flags);
|
||||
if (addr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -73,7 +73,7 @@ template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
|
||||
size_t size = size_for(length);
|
||||
|
||||
char* addr = os::reserve_memory(size, flags);
|
||||
char* addr = os::reserve_memory(size, !ExecMem, flags);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
@ -81,27 +81,27 @@ ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd) {
|
||||
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::attempt_map_memory_to_file_at(base, size, fd);
|
||||
}
|
||||
return os::attempt_reserve_memory_at(base, size);
|
||||
return os::attempt_reserve_memory_at(base, size, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory(size_t size, int fd) {
|
||||
static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file(size, fd);
|
||||
}
|
||||
return os::reserve_memory(size);
|
||||
return os::reserve_memory(size, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd) {
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
|
||||
if (fd != -1) {
|
||||
return os::map_memory_to_file_aligned(size, alignment, fd);
|
||||
}
|
||||
return os::reserve_memory_aligned(size, alignment);
|
||||
return os::reserve_memory_aligned(size, alignment, executable);
|
||||
}
|
||||
|
||||
// Helper method
|
||||
@ -212,13 +212,13 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
// important. If available space is not detected, return NULL.
|
||||
|
||||
if (requested_address != 0) {
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap);
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap, _executable);
|
||||
if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
|
||||
// OS ignored requested address. Try different address.
|
||||
base = NULL;
|
||||
}
|
||||
} else {
|
||||
base = map_or_reserve_memory(size, _fd_for_heap);
|
||||
base = map_or_reserve_memory(size, _fd_for_heap, _executable);
|
||||
}
|
||||
|
||||
if (base == NULL) return;
|
||||
@ -230,7 +230,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
|
||||
// Make sure that size is aligned
|
||||
size = align_up(size, alignment);
|
||||
base = map_or_reserve_memory_aligned(size, alignment, _fd_for_heap);
|
||||
base = map_or_reserve_memory_aligned(size, alignment, _fd_for_heap, _executable);
|
||||
|
||||
if (requested_address != 0 &&
|
||||
failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
|
||||
@ -414,13 +414,13 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
|
||||
}
|
||||
|
||||
if (requested_address != 0) {
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap);
|
||||
base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap, executable());
|
||||
} else {
|
||||
// Optimistically assume that the OSes returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
// If the returned memory is not aligned we will release and retry.
|
||||
base = map_or_reserve_memory(size, _fd_for_heap);
|
||||
base = map_or_reserve_memory(size, _fd_for_heap, executable());
|
||||
}
|
||||
}
|
||||
if (base == NULL) { return; }
|
||||
@ -1024,7 +1024,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(middle_high_boundary() <= aligned_upper_new_high &&
|
||||
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
|
||||
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
@ -1035,7 +1035,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(lower_high_boundary() <= aligned_middle_new_high &&
|
||||
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
|
||||
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
@ -1046,7 +1046,7 @@ void VirtualSpace::shrink_by(size_t size) {
|
||||
assert(low_boundary() <= aligned_lower_new_high &&
|
||||
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
|
||||
"must not shrink beyond region");
|
||||
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
|
||||
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
|
||||
debug_only(warning("os::uncommit_memory failed"));
|
||||
return;
|
||||
} else {
|
||||
|
@ -1645,8 +1645,8 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
return os::pd_create_stack_guard_pages(addr, bytes);
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes);
|
||||
char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, executable);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC);
|
||||
if (flags != mtOther) {
|
||||
@ -1657,8 +1657,8 @@ char* os::reserve_memory(size_t bytes, MEMFLAGS flags) {
|
||||
return result;
|
||||
}
|
||||
|
||||
char* os::attempt_reserve_memory_at(char* addr, size_t bytes) {
|
||||
char* result = pd_attempt_reserve_memory_at(addr, bytes);
|
||||
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable) {
|
||||
char* result = pd_attempt_reserve_memory_at(addr, bytes, executable);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
} else {
|
||||
@ -1697,16 +1697,16 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
|
||||
}
|
||||
|
||||
bool os::uncommit_memory(char* addr, size_t bytes) {
|
||||
bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
|
||||
bool res;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr(Tracker::uncommit);
|
||||
res = pd_uncommit_memory(addr, bytes);
|
||||
res = pd_uncommit_memory(addr, bytes, executable);
|
||||
if (res) {
|
||||
tkr.record((address)addr, bytes);
|
||||
}
|
||||
} else {
|
||||
res = pd_uncommit_memory(addr, bytes);
|
||||
res = pd_uncommit_memory(addr, bytes, executable);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -125,9 +125,9 @@ class os: AllStatic {
|
||||
static address _polling_page;
|
||||
static PageSizes _page_sizes;
|
||||
|
||||
static char* pd_reserve_memory(size_t bytes);
|
||||
static char* pd_reserve_memory(size_t bytes, bool executable);
|
||||
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes);
|
||||
static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool executable);
|
||||
|
||||
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
@ -139,7 +139,7 @@ class os: AllStatic {
|
||||
static void pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, const char* mesg);
|
||||
static bool pd_uncommit_memory(char* addr, size_t bytes);
|
||||
static bool pd_uncommit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_release_memory(char* addr, size_t bytes);
|
||||
|
||||
static char* pd_attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc);
|
||||
@ -327,14 +327,14 @@ class os: AllStatic {
|
||||
static int vm_allocation_granularity();
|
||||
|
||||
// Reserves virtual memory.
|
||||
static char* reserve_memory(size_t bytes, MEMFLAGS flags = mtOther);
|
||||
static char* reserve_memory(size_t bytes, bool executable = false, MEMFLAGS flags = mtOther);
|
||||
|
||||
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment);
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false);
|
||||
|
||||
// Attempts to reserve the virtual memory at [addr, addr + bytes).
|
||||
// Does not overwrite existing mappings.
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes);
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false);
|
||||
|
||||
// Split a reserved memory region [base, base+size) into two regions [base, base+split) and
|
||||
// [base+split, base+size).
|
||||
@ -356,7 +356,7 @@ class os: AllStatic {
|
||||
static void commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, const char* mesg);
|
||||
static bool uncommit_memory(char* addr, size_t bytes);
|
||||
static bool uncommit_memory(char* addr, size_t bytes, bool executable = false);
|
||||
static bool release_memory(char* addr, size_t bytes);
|
||||
|
||||
// A diagnostic function to print memory mappings in the given range.
|
||||
|
@ -101,8 +101,8 @@ public:
|
||||
static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, false);
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, !ExecMem);
|
||||
size_t index;
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
for (index = 0; index < touch_pages; index ++) {
|
||||
@ -169,9 +169,9 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t num_pages = 4;
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, mtTest);
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtTest);
|
||||
ASSERT_NE(base, (char*)NULL);
|
||||
result = os::commit_memory(base, size, false);
|
||||
result = os::commit_memory(base, size, !ExecMem);
|
||||
|
||||
ASSERT_TRUE(result);
|
||||
// touch all pages
|
||||
|
Loading…
Reference in New Issue
Block a user