Merge
This commit is contained in:
commit
15464d7992
@ -2325,7 +2325,9 @@ void os::large_page_init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
fatal("This code is not used or maintained.");
|
||||||
|
|
||||||
// "exec" is passed in but not used. Creating the shared image for
|
// "exec" is passed in but not used. Creating the shared image for
|
||||||
// the code cache doesn't have an SHM_X executable permission to check.
|
// the code cache doesn't have an SHM_X executable permission to check.
|
||||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||||
@ -4752,3 +4754,8 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -40,6 +40,9 @@
|
|||||||
product(bool, UseHugeTLBFS, false, \
|
product(bool, UseHugeTLBFS, false, \
|
||||||
"Use MAP_HUGETLB for large pages") \
|
"Use MAP_HUGETLB for large pages") \
|
||||||
\
|
\
|
||||||
|
product(bool, UseTransparentHugePages, false, \
|
||||||
|
"Use MADV_HUGEPAGE for large pages") \
|
||||||
|
\
|
||||||
product(bool, LoadExecStackDllInVMThread, true, \
|
product(bool, LoadExecStackDllInVMThread, true, \
|
||||||
"Load DLLs with executable-stack attribute in the VM Thread") \
|
"Load DLLs with executable-stack attribute in the VM Thread") \
|
||||||
\
|
\
|
||||||
|
@ -2720,36 +2720,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
|||||||
|
|
||||||
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
||||||
size_t alignment_hint, bool exec) {
|
size_t alignment_hint, bool exec) {
|
||||||
int err;
|
int err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
|
||||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
|
||||||
uintptr_t res =
|
|
||||||
(uintptr_t) ::mmap(addr, size, prot,
|
|
||||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
|
|
||||||
-1, 0);
|
|
||||||
if (res != (uintptr_t) MAP_FAILED) {
|
|
||||||
if (UseNUMAInterleaving) {
|
|
||||||
numa_make_global(addr, size);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = errno; // save errno from mmap() call above
|
|
||||||
|
|
||||||
if (!recoverable_mmap_error(err)) {
|
|
||||||
// However, it is not clear that this loss of our reserved mapping
|
|
||||||
// happens with large pages on Linux or that we cannot recover
|
|
||||||
// from the loss. For now, we just issue a warning and we don't
|
|
||||||
// call vm_exit_out_of_memory(). This issue is being tracked by
|
|
||||||
// JBS-8007074.
|
|
||||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
|
||||||
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
|
|
||||||
// "committing reserved memory.");
|
|
||||||
}
|
|
||||||
// Fall through and try to use small pages
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os::Linux::commit_memory_impl(addr, size, exec);
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
realign_memory(addr, size, alignment_hint);
|
realign_memory(addr, size, alignment_hint);
|
||||||
}
|
}
|
||||||
@ -2774,7 +2745,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
|
||||||
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
||||||
// be supported or the memory may already be backed by huge pages.
|
// be supported or the memory may already be backed by huge pages.
|
||||||
::madvise(addr, bytes, MADV_HUGEPAGE);
|
::madvise(addr, bytes, MADV_HUGEPAGE);
|
||||||
@ -2787,7 +2758,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
|||||||
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
||||||
// small pages on top of the SHM segment. This method always works for small pages, so we
|
// small pages on top of the SHM segment. This method always works for small pages, so we
|
||||||
// allow that in any case.
|
// allow that in any case.
|
||||||
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
|
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
|
||||||
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3157,9 +3128,29 @@ bool os::unguard_memory(char* addr, size_t size) {
|
|||||||
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
|
||||||
|
bool result = false;
|
||||||
|
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
|
||||||
|
MAP_ANONYMOUS|MAP_PRIVATE,
|
||||||
|
-1, 0);
|
||||||
|
if (p != MAP_FAILED) {
|
||||||
|
void *aligned_p = align_ptr_up(p, page_size);
|
||||||
|
|
||||||
|
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
|
||||||
|
|
||||||
|
munmap(p, page_size * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warn && !result) {
|
||||||
|
warning("TransparentHugePages is not supported by the operating system.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||||
bool result = false;
|
bool result = false;
|
||||||
void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
|
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
|
||||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||||
-1, 0);
|
-1, 0);
|
||||||
|
|
||||||
@ -3182,12 +3173,10 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
|||||||
}
|
}
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
munmap (p, page_size);
|
munmap(p, page_size);
|
||||||
if (result)
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (warn) {
|
if (warn && !result) {
|
||||||
warning("HugeTLBFS is not supported by the operating system.");
|
warning("HugeTLBFS is not supported by the operating system.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3235,26 +3224,9 @@ static void set_coredump_filter(void) {
|
|||||||
|
|
||||||
static size_t _large_page_size = 0;
|
static size_t _large_page_size = 0;
|
||||||
|
|
||||||
void os::large_page_init() {
|
size_t os::Linux::find_large_page_size() {
|
||||||
if (!UseLargePages) {
|
size_t large_page_size = 0;
|
||||||
UseHugeTLBFS = false;
|
|
||||||
UseSHM = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
|
|
||||||
// If UseLargePages is specified on the command line try both methods,
|
|
||||||
// if it's default, then try only HugeTLBFS.
|
|
||||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
|
||||||
UseHugeTLBFS = true;
|
|
||||||
} else {
|
|
||||||
UseHugeTLBFS = UseSHM = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (LargePageSizeInBytes) {
|
|
||||||
_large_page_size = LargePageSizeInBytes;
|
|
||||||
} else {
|
|
||||||
// large_page_size on Linux is used to round up heap size. x86 uses either
|
// large_page_size on Linux is used to round up heap size. x86 uses either
|
||||||
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
||||||
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
||||||
@ -3269,7 +3241,7 @@ void os::large_page_init() {
|
|||||||
// the processor.
|
// the processor.
|
||||||
|
|
||||||
#ifndef ZERO
|
#ifndef ZERO
|
||||||
_large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
||||||
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
||||||
#endif // ZERO
|
#endif // ZERO
|
||||||
|
|
||||||
@ -3280,7 +3252,7 @@ void os::large_page_init() {
|
|||||||
char buf[16];
|
char buf[16];
|
||||||
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
||||||
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
||||||
_large_page_size = x * K;
|
large_page_size = x * K;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -3293,24 +3265,73 @@ void os::large_page_init() {
|
|||||||
}
|
}
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
|
||||||
|
warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
|
||||||
|
SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
|
||||||
|
proper_unit_for_byte_size(large_page_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
// print a warning if any large page related flag is specified on command line
|
return large_page_size;
|
||||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
}
|
||||||
|
|
||||||
|
size_t os::Linux::setup_large_page_size() {
|
||||||
|
_large_page_size = Linux::find_large_page_size();
|
||||||
const size_t default_page_size = (size_t)Linux::page_size();
|
const size_t default_page_size = (size_t)Linux::page_size();
|
||||||
if (_large_page_size > default_page_size) {
|
if (_large_page_size > default_page_size) {
|
||||||
_page_sizes[0] = _large_page_size;
|
_page_sizes[0] = _large_page_size;
|
||||||
_page_sizes[1] = default_page_size;
|
_page_sizes[1] = default_page_size;
|
||||||
_page_sizes[2] = 0;
|
_page_sizes[2] = 0;
|
||||||
}
|
}
|
||||||
UseHugeTLBFS = UseHugeTLBFS &&
|
|
||||||
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
|
|
||||||
|
|
||||||
if (UseHugeTLBFS)
|
return _large_page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::setup_large_page_type(size_t page_size) {
|
||||||
|
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
|
||||||
|
FLAG_IS_DEFAULT(UseSHM) &&
|
||||||
|
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
|
||||||
|
// If UseLargePages is specified on the command line try all methods,
|
||||||
|
// if it's default, then try only UseTransparentHugePages.
|
||||||
|
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||||
|
UseTransparentHugePages = true;
|
||||||
|
} else {
|
||||||
|
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (UseTransparentHugePages) {
|
||||||
|
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
|
||||||
|
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
|
||||||
|
UseHugeTLBFS = false;
|
||||||
UseSHM = false;
|
UseSHM = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
UseTransparentHugePages = false;
|
||||||
|
}
|
||||||
|
|
||||||
UseLargePages = UseHugeTLBFS || UseSHM;
|
if (UseHugeTLBFS) {
|
||||||
|
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
||||||
|
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
|
||||||
|
UseSHM = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
UseHugeTLBFS = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return UseSHM;
|
||||||
|
}
|
||||||
|
|
||||||
|
void os::large_page_init() {
|
||||||
|
if (!UseLargePages) {
|
||||||
|
UseHugeTLBFS = false;
|
||||||
|
UseTransparentHugePages = false;
|
||||||
|
UseSHM = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t large_page_size = Linux::setup_large_page_size();
|
||||||
|
UseLargePages = Linux::setup_large_page_type(large_page_size);
|
||||||
|
|
||||||
set_coredump_filter();
|
set_coredump_filter();
|
||||||
}
|
}
|
||||||
@ -3319,16 +3340,22 @@ void os::large_page_init() {
|
|||||||
#define SHM_HUGETLB 04000
|
#define SHM_HUGETLB 04000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
// "exec" is passed in but not used. Creating the shared image for
|
// "exec" is passed in but not used. Creating the shared image for
|
||||||
// the code cache doesn't have an SHM_X executable permission to check.
|
// the code cache doesn't have an SHM_X executable permission to check.
|
||||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||||
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||||
|
|
||||||
|
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||||
|
return NULL; // Fallback to small pages.
|
||||||
|
}
|
||||||
|
|
||||||
key_t key = IPC_PRIVATE;
|
key_t key = IPC_PRIVATE;
|
||||||
char *addr;
|
char *addr;
|
||||||
|
|
||||||
bool warn_on_failure = UseLargePages &&
|
bool warn_on_failure = UseLargePages &&
|
||||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||||
|
!FLAG_IS_DEFAULT(UseSHM) ||
|
||||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
|
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
|
||||||
);
|
);
|
||||||
char msg[128];
|
char msg[128];
|
||||||
@ -3376,42 +3403,219 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((addr != NULL) && UseNUMAInterleaving) {
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
|
||||||
|
assert(error == ENOMEM, "Only expect to fail if no memory is available");
|
||||||
|
|
||||||
|
bool warn_on_failure = UseLargePages &&
|
||||||
|
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||||
|
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
|
||||||
|
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||||
|
|
||||||
|
if (warn_on_failure) {
|
||||||
|
char msg[128];
|
||||||
|
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
|
||||||
|
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
|
||||||
|
warning(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||||
|
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
|
||||||
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||||
|
|
||||||
|
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||||
|
char* addr = (char*)::mmap(req_addr, bytes, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
|
||||||
|
-1, 0);
|
||||||
|
|
||||||
|
if (addr == MAP_FAILED) {
|
||||||
|
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
size_t large_page_size = os::large_page_size();
|
||||||
|
|
||||||
|
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
|
||||||
|
|
||||||
|
// Allocate small pages.
|
||||||
|
|
||||||
|
char* start;
|
||||||
|
if (req_addr != NULL) {
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||||
|
assert(is_size_aligned(bytes, alignment), "Must be");
|
||||||
|
start = os::reserve_memory(bytes, req_addr);
|
||||||
|
assert(start == NULL || start == req_addr, "Must be");
|
||||||
|
} else {
|
||||||
|
start = os::reserve_memory_aligned(bytes, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (start == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||||
|
|
||||||
|
// os::reserve_memory_special will record this memory area.
|
||||||
|
// Need to release it here to prevent overlapping reservations.
|
||||||
|
MemTracker::record_virtual_memory_release((address)start, bytes);
|
||||||
|
|
||||||
|
char* end = start + bytes;
|
||||||
|
|
||||||
|
// Find the regions of the allocated chunk that can be promoted to large pages.
|
||||||
|
char* lp_start = (char*)align_ptr_up(start, large_page_size);
|
||||||
|
char* lp_end = (char*)align_ptr_down(end, large_page_size);
|
||||||
|
|
||||||
|
size_t lp_bytes = lp_end - lp_start;
|
||||||
|
|
||||||
|
assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
|
||||||
|
|
||||||
|
if (lp_bytes == 0) {
|
||||||
|
// The mapped region doesn't even span the start and the end of a large page.
|
||||||
|
// Fall back to allocate a non-special area.
|
||||||
|
::munmap(start, end - start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||||
|
|
||||||
|
|
||||||
|
void* result;
|
||||||
|
|
||||||
|
if (start != lp_start) {
|
||||||
|
result = ::mmap(start, lp_start - start, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
::munmap(lp_start, end - lp_start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = ::mmap(lp_start, lp_bytes, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||||
|
// If the mmap above fails, the large pages region will be unmapped and we
|
||||||
|
// have regions before and after with small pages. Release these regions.
|
||||||
|
//
|
||||||
|
// | mapped | unmapped | mapped |
|
||||||
|
// ^ ^ ^ ^
|
||||||
|
// start lp_start lp_end end
|
||||||
|
//
|
||||||
|
::munmap(start, lp_start - start);
|
||||||
|
::munmap(lp_end, end - lp_end);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lp_end != end) {
|
||||||
|
result = ::mmap(lp_end, end - lp_end, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
::munmap(start, lp_end - start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||||
|
assert(is_power_of_2(alignment), "Must be");
|
||||||
|
assert(is_power_of_2(os::large_page_size()), "Must be");
|
||||||
|
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
|
||||||
|
|
||||||
|
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
|
||||||
|
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
|
||||||
|
} else {
|
||||||
|
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
|
char* addr;
|
||||||
|
if (UseSHM) {
|
||||||
|
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
|
||||||
|
} else {
|
||||||
|
assert(UseHugeTLBFS, "must be");
|
||||||
|
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
if (UseNUMAInterleaving) {
|
||||||
numa_make_global(addr, bytes);
|
numa_make_global(addr, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The memory is committed
|
// The memory is committed
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||||
|
}
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
|
||||||
|
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
|
||||||
|
return shmdt(base) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
||||||
|
return pd_release_memory(base, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
bool os::release_memory_special(char* base, size_t bytes) {
|
bool os::release_memory_special(char* base, size_t bytes) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
|
||||||
int rslt = shmdt(base);
|
bool res;
|
||||||
if (rslt == 0) {
|
if (UseSHM) {
|
||||||
|
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||||
|
} else {
|
||||||
|
assert(UseHugeTLBFS, "must be");
|
||||||
|
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res) {
|
||||||
tkr.record((address)base, bytes);
|
tkr.record((address)base, bytes);
|
||||||
return true;
|
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
tkr.discard();
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t os::large_page_size() {
|
size_t os::large_page_size() {
|
||||||
return _large_page_size;
|
return _large_page_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
// HugeTLBFS allows application to commit large page memory on demand;
|
// With SysV SHM the entire memory region must be allocated as shared
|
||||||
// with SysV SHM the entire memory region must be allocated as shared
|
|
||||||
// memory.
|
// memory.
|
||||||
|
// HugeTLBFS allows application to commit large page memory on demand.
|
||||||
|
// However, when committing memory with HugeTLBFS fails, the region
|
||||||
|
// that was supposed to be committed will lose the old reservation
|
||||||
|
// and allow other threads to steal that memory region. Because of this
|
||||||
|
// behavior we can't commit HugeTLBFS memory.
|
||||||
bool os::can_commit_large_page_memory() {
|
bool os::can_commit_large_page_memory() {
|
||||||
return UseHugeTLBFS;
|
return UseTransparentHugePages;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool os::can_execute_large_page_memory() {
|
bool os::can_execute_large_page_memory() {
|
||||||
return UseHugeTLBFS;
|
return UseTransparentHugePages || UseHugeTLBFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reserve memory at an arbitrary address, only if that area is
|
// Reserve memory at an arbitrary address, only if that area is
|
||||||
@ -4563,21 +4767,23 @@ jint os::init_2(void)
|
|||||||
UseNUMA = false;
|
UseNUMA = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// With SHM large pages we cannot uncommit a page, so there's not way
|
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||||
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
||||||
// both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
|
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
|
||||||
// disable adaptive resizing.
|
// disable adaptive resizing.
|
||||||
if (UseNUMA && UseLargePages && UseSHM) {
|
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||||
if (!FLAG_IS_DEFAULT(UseNUMA)) {
|
if (FLAG_IS_DEFAULT(UseNUMA)) {
|
||||||
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
|
UseNUMA = false;
|
||||||
|
} else {
|
||||||
|
if (FLAG_IS_DEFAULT(UseLargePages) &&
|
||||||
|
FLAG_IS_DEFAULT(UseSHM) &&
|
||||||
|
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
|
||||||
UseLargePages = false;
|
UseLargePages = false;
|
||||||
} else {
|
} else {
|
||||||
warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
|
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
|
||||||
UseAdaptiveSizePolicy = false;
|
UseAdaptiveSizePolicy = false;
|
||||||
UseAdaptiveNUMAChunkSizing = false;
|
UseAdaptiveNUMAChunkSizing = false;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
UseNUMA = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!UseNUMA && ForceNUMA) {
|
if (!UseNUMA && ForceNUMA) {
|
||||||
@ -5848,3 +6054,149 @@ void MemNotifyThread::start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif // JAVASE_EMBEDDED
|
#endif // JAVASE_EMBEDDED
|
||||||
|
|
||||||
|
|
||||||
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
#define test_log(...) \
|
||||||
|
do {\
|
||||||
|
if (VerboseInternalVMTests) { \
|
||||||
|
tty->print_cr(__VA_ARGS__); \
|
||||||
|
tty->flush(); \
|
||||||
|
}\
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
class TestReserveMemorySpecial : AllStatic {
|
||||||
|
public:
|
||||||
|
static void small_page_write(void* addr, size_t size) {
|
||||||
|
size_t page_size = os::vm_page_size();
|
||||||
|
|
||||||
|
char* end = (char*)addr + size;
|
||||||
|
for (char* p = (char*)addr; p < end; p += page_size) {
|
||||||
|
*p = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_only() {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
|
||||||
|
for (size_t size = lp; size <= lp * 10; size += lp) {
|
||||||
|
test_reserve_memory_special_huge_tlbfs_only(size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
|
||||||
|
size, alignment);
|
||||||
|
|
||||||
|
assert(size >= os::large_page_size(), "Incorrect input to test");
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed() {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs() {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_reserve_memory_special_huge_tlbfs_only();
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
|
||||||
|
if (!UseSHM) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
assert(is_ptr_aligned(addr, alignment), "Check");
|
||||||
|
assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
|
||||||
|
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_shm(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_shm() {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
for (size_t size = ag; size < lp * 3; size += ag) {
|
||||||
|
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||||
|
test_reserve_memory_special_shm(size, alignment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test() {
|
||||||
|
test_reserve_memory_special_huge_tlbfs();
|
||||||
|
test_reserve_memory_special_shm();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
TestReserveMemorySpecial::test();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -32,6 +32,7 @@ typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
|
|||||||
|
|
||||||
class Linux {
|
class Linux {
|
||||||
friend class os;
|
friend class os;
|
||||||
|
friend class TestReserveMemorySpecial;
|
||||||
|
|
||||||
// For signal-chaining
|
// For signal-chaining
|
||||||
#define MAXSIGNUM 32
|
#define MAXSIGNUM 32
|
||||||
@ -92,8 +93,21 @@ class Linux {
|
|||||||
static void rebuild_cpu_to_node_map();
|
static void rebuild_cpu_to_node_map();
|
||||||
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
||||||
|
|
||||||
|
static size_t find_large_page_size();
|
||||||
|
static size_t setup_large_page_size();
|
||||||
|
|
||||||
|
static bool setup_large_page_type(size_t page_size);
|
||||||
|
static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
|
||||||
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
|
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
|
||||||
|
|
||||||
|
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
|
||||||
|
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||||
|
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||||
|
|
||||||
static void print_full_memory_info(outputStream* st);
|
static void print_full_memory_info(outputStream* st);
|
||||||
static void print_distro_info(outputStream* st);
|
static void print_distro_info(outputStream* st);
|
||||||
static void print_libversion_info(outputStream* st);
|
static void print_libversion_info(outputStream* st);
|
||||||
|
@ -3385,7 +3385,7 @@ bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
|
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
|
||||||
fatal("os::reserve_memory_special should not be called on Solaris.");
|
fatal("os::reserve_memory_special should not be called on Solaris.");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -6601,3 +6601,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
|||||||
|
|
||||||
return strlen(buffer);
|
return strlen(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -3156,7 +3156,12 @@ bool os::can_execute_large_page_memory() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
|
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||||
|
return NULL; // Fallback to small pages.
|
||||||
|
}
|
||||||
|
|
||||||
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||||
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||||
@ -5638,3 +5643,9 @@ BOOL os::Advapi32Dll::AdvapiAvailable() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -2006,10 +2006,12 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
||||||
size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
||||||
|
size_t heap_alignment = collector_policy()->max_alignment();
|
||||||
|
|
||||||
// Ensure that the sizes are properly aligned.
|
// Ensure that the sizes are properly aligned.
|
||||||
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||||
|
Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
|
||||||
|
|
||||||
_cg1r = new ConcurrentG1Refine(this);
|
_cg1r = new ConcurrentG1Refine(this);
|
||||||
|
|
||||||
@ -2026,12 +2028,8 @@ jint G1CollectedHeap::initialize() {
|
|||||||
// If this happens then we could end up using a non-optimal
|
// If this happens then we could end up using a non-optimal
|
||||||
// compressed oops mode.
|
// compressed oops mode.
|
||||||
|
|
||||||
// Since max_byte_size is aligned to the size of a heap region (checked
|
|
||||||
// above).
|
|
||||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
|
||||||
|
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
||||||
HeapRegion::GrainBytes);
|
heap_alignment);
|
||||||
|
|
||||||
// It is important to do this in a way such that concurrent readers can't
|
// It is important to do this in a way such that concurrent readers can't
|
||||||
// temporarily think something is in the heap. (I've actually seen this
|
// temporarily think something is in the heap. (I've actually seen this
|
||||||
|
@ -313,7 +313,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
void G1CollectorPolicy::initialize_flags() {
|
void G1CollectorPolicy::initialize_flags() {
|
||||||
set_min_alignment(HeapRegion::GrainBytes);
|
set_min_alignment(HeapRegion::GrainBytes);
|
||||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
||||||
set_max_alignment(MAX2(card_table_alignment, min_alignment()));
|
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||||
|
set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
|
||||||
if (SurvivorRatio < 1) {
|
if (SurvivorRatio < 1) {
|
||||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||||
}
|
}
|
||||||
|
@ -193,6 +193,8 @@ size_t GenCollectorPolicy::compute_max_alignment() {
|
|||||||
alignment = lcm(os::large_page_size(), alignment);
|
alignment = lcm(os::large_page_size(), alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(alignment >= min_alignment(), "Must be");
|
||||||
|
|
||||||
return alignment;
|
return alignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,13 +95,13 @@ jint GenCollectedHeap::initialize() {
|
|||||||
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
||||||
|
|
||||||
// The heap must be at least as aligned as generations.
|
// The heap must be at least as aligned as generations.
|
||||||
size_t alignment = Generation::GenGrain;
|
size_t gen_alignment = Generation::GenGrain;
|
||||||
|
|
||||||
_gen_specs = gen_policy()->generations();
|
_gen_specs = gen_policy()->generations();
|
||||||
|
|
||||||
// Make sure the sizes are all aligned.
|
// Make sure the sizes are all aligned.
|
||||||
for (i = 0; i < _n_gens; i++) {
|
for (i = 0; i < _n_gens; i++) {
|
||||||
_gen_specs[i]->align(alignment);
|
_gen_specs[i]->align(gen_alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate space for the heap.
|
// Allocate space for the heap.
|
||||||
@ -109,9 +109,11 @@ jint GenCollectedHeap::initialize() {
|
|||||||
char* heap_address;
|
char* heap_address;
|
||||||
size_t total_reserved = 0;
|
size_t total_reserved = 0;
|
||||||
int n_covered_regions = 0;
|
int n_covered_regions = 0;
|
||||||
ReservedSpace heap_rs(0);
|
ReservedSpace heap_rs;
|
||||||
|
|
||||||
heap_address = allocate(alignment, &total_reserved,
|
size_t heap_alignment = collector_policy()->max_alignment();
|
||||||
|
|
||||||
|
heap_address = allocate(heap_alignment, &total_reserved,
|
||||||
&n_covered_regions, &heap_rs);
|
&n_covered_regions, &heap_rs);
|
||||||
|
|
||||||
if (!heap_rs.is_reserved()) {
|
if (!heap_rs.is_reserved()) {
|
||||||
@ -168,6 +170,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
|||||||
const size_t pageSize = UseLargePages ?
|
const size_t pageSize = UseLargePages ?
|
||||||
os::large_page_size() : os::vm_page_size();
|
os::large_page_size() : os::vm_page_size();
|
||||||
|
|
||||||
|
assert(alignment % pageSize == 0, "Must be");
|
||||||
|
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
for (int i = 0; i < _n_gens; i++) {
|
||||||
total_reserved += _gen_specs[i]->max_size();
|
total_reserved += _gen_specs[i]->max_size();
|
||||||
if (total_reserved < _gen_specs[i]->max_size()) {
|
if (total_reserved < _gen_specs[i]->max_size()) {
|
||||||
@ -175,24 +179,17 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
|||||||
}
|
}
|
||||||
n_covered_regions += _gen_specs[i]->n_covered_regions();
|
n_covered_regions += _gen_specs[i]->n_covered_regions();
|
||||||
}
|
}
|
||||||
assert(total_reserved % pageSize == 0,
|
assert(total_reserved % alignment == 0,
|
||||||
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
|
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
|
||||||
SIZE_FORMAT, total_reserved, pageSize));
|
SIZE_FORMAT, total_reserved, alignment));
|
||||||
|
|
||||||
// Needed until the cardtable is fixed to have the right number
|
// Needed until the cardtable is fixed to have the right number
|
||||||
// of covered regions.
|
// of covered regions.
|
||||||
n_covered_regions += 2;
|
n_covered_regions += 2;
|
||||||
|
|
||||||
if (UseLargePages) {
|
|
||||||
assert(total_reserved != 0, "total_reserved cannot be 0");
|
|
||||||
total_reserved = round_to(total_reserved, os::large_page_size());
|
|
||||||
if (total_reserved < os::large_page_size()) {
|
|
||||||
vm_exit_during_initialization(overflow_msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*_total_reserved = total_reserved;
|
*_total_reserved = total_reserved;
|
||||||
*_n_covered_regions = n_covered_regions;
|
*_n_covered_regions = n_covered_regions;
|
||||||
|
|
||||||
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
||||||
return heap_rs->base();
|
return heap_rs->base();
|
||||||
}
|
}
|
||||||
|
@ -345,7 +345,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// byte_size is the size of the associated virtualspace.
|
// byte_size is the size of the associated virtualspace.
|
||||||
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
|
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
|
||||||
// align up to vm allocation granularity
|
// align up to vm allocation granularity
|
||||||
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
|
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
|
||||||
|
|
||||||
|
@ -681,17 +681,23 @@ static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
|
|||||||
// 32Gb
|
// 32Gb
|
||||||
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
||||||
|
|
||||||
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
|
||||||
|
assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
|
||||||
|
assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
|
||||||
|
assert(is_size_aligned(heap_size, alignment), "Must be");
|
||||||
|
|
||||||
|
uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
|
||||||
|
|
||||||
size_t base = 0;
|
size_t base = 0;
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
assert(mode == UnscaledNarrowOop ||
|
assert(mode == UnscaledNarrowOop ||
|
||||||
mode == ZeroBasedNarrowOop ||
|
mode == ZeroBasedNarrowOop ||
|
||||||
mode == HeapBasedNarrowOop, "mode is invalid");
|
mode == HeapBasedNarrowOop, "mode is invalid");
|
||||||
const size_t total_size = heap_size + HeapBaseMinAddress;
|
const size_t total_size = heap_size + heap_base_min_address_aligned;
|
||||||
// Return specified base for the first request.
|
// Return specified base for the first request.
|
||||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
||||||
base = HeapBaseMinAddress;
|
base = heap_base_min_address_aligned;
|
||||||
|
|
||||||
// If the total size is small enough to allow UnscaledNarrowOop then
|
// If the total size is small enough to allow UnscaledNarrowOop then
|
||||||
// just use UnscaledNarrowOop.
|
// just use UnscaledNarrowOop.
|
||||||
@ -742,6 +748,8 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
assert(is_ptr_aligned((char*)base, alignment), "Must be");
|
||||||
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -867,27 +875,33 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
|||||||
size_t total_reserved = align_size_up(heap_size, alignment);
|
size_t total_reserved = align_size_up(heap_size, alignment);
|
||||||
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
||||||
"heap size is too big for compressed oops");
|
"heap size is too big for compressed oops");
|
||||||
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
|
|
||||||
|
|
||||||
ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
|
bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
|
||||||
|
assert(!UseLargePages
|
||||||
|
|| UseParallelOldGC
|
||||||
|
|| use_large_pages, "Wrong alignment to use large pages");
|
||||||
|
|
||||||
|
char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
|
||||||
|
|
||||||
|
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
|
||||||
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
if (addr != NULL && !total_rs.is_reserved()) {
|
if (addr != NULL && !total_rs.is_reserved()) {
|
||||||
// Failed to reserve at specified address - the requested memory
|
// Failed to reserve at specified address - the requested memory
|
||||||
// region is taken already, for example, by 'java' launcher.
|
// region is taken already, for example, by 'java' launcher.
|
||||||
// Try again to reserver heap higher.
|
// Try again to reserver heap higher.
|
||||||
addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
|
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
|
||||||
|
|
||||||
ReservedHeapSpace total_rs0(total_reserved, alignment,
|
ReservedHeapSpace total_rs0(total_reserved, alignment,
|
||||||
UseLargePages, addr);
|
use_large_pages, addr);
|
||||||
|
|
||||||
if (addr != NULL && !total_rs0.is_reserved()) {
|
if (addr != NULL && !total_rs0.is_reserved()) {
|
||||||
// Failed to reserve at specified address again - give up.
|
// Failed to reserve at specified address again - give up.
|
||||||
addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
|
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
|
||||||
assert(addr == NULL, "");
|
assert(addr == NULL, "");
|
||||||
|
|
||||||
ReservedHeapSpace total_rs1(total_reserved, alignment,
|
ReservedHeapSpace total_rs1(total_reserved, alignment,
|
||||||
UseLargePages, addr);
|
use_large_pages, addr);
|
||||||
total_rs = total_rs1;
|
total_rs = total_rs1;
|
||||||
} else {
|
} else {
|
||||||
total_rs = total_rs0;
|
total_rs = total_rs0;
|
||||||
|
@ -346,7 +346,7 @@ class Universe: AllStatic {
|
|||||||
};
|
};
|
||||||
static NARROW_OOP_MODE narrow_oop_mode();
|
static NARROW_OOP_MODE narrow_oop_mode();
|
||||||
static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
|
static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
|
||||||
static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
|
static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
|
||||||
static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
|
static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
|
||||||
static address narrow_oop_base() { return _narrow_oop._base; }
|
static address narrow_oop_base() { return _narrow_oop._base; }
|
||||||
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
|
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
|
||||||
|
@ -5045,9 +5045,15 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
|
|||||||
tty->print_cr("Running test: " #unit_test_function_call); \
|
tty->print_cr("Running test: " #unit_test_function_call); \
|
||||||
unit_test_function_call
|
unit_test_function_call
|
||||||
|
|
||||||
|
// Forward declaration
|
||||||
|
void TestReservedSpace_test();
|
||||||
|
void TestReserveMemorySpecial_test();
|
||||||
|
|
||||||
void execute_internal_vm_tests() {
|
void execute_internal_vm_tests() {
|
||||||
if (ExecuteInternalVMTests) {
|
if (ExecuteInternalVMTests) {
|
||||||
tty->print_cr("Running internal VM tests");
|
tty->print_cr("Running internal VM tests");
|
||||||
|
run_unit_test(TestReservedSpace_test());
|
||||||
|
run_unit_test(TestReserveMemorySpecial_test());
|
||||||
run_unit_test(GlobalDefinitions::test_globals());
|
run_unit_test(GlobalDefinitions::test_globals());
|
||||||
run_unit_test(GCTimerAllTest::all());
|
run_unit_test(GCTimerAllTest::all());
|
||||||
run_unit_test(arrayOopDesc::test_max_array_length());
|
run_unit_test(arrayOopDesc::test_max_array_length());
|
||||||
|
@ -1933,6 +1933,9 @@ class CommandLineFlags {
|
|||||||
notproduct(bool, ExecuteInternalVMTests, false, \
|
notproduct(bool, ExecuteInternalVMTests, false, \
|
||||||
"Enable execution of internal VM tests.") \
|
"Enable execution of internal VM tests.") \
|
||||||
\
|
\
|
||||||
|
notproduct(bool, VerboseInternalVMTests, false, \
|
||||||
|
"Turn on logging for internal VM tests.") \
|
||||||
|
\
|
||||||
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
|
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
|
||||||
\
|
\
|
||||||
product_pd(bool, ResizeTLAB, \
|
product_pd(bool, ResizeTLAB, \
|
||||||
|
@ -328,8 +328,8 @@ class os: AllStatic {
|
|||||||
|
|
||||||
static char* non_memory_address_word();
|
static char* non_memory_address_word();
|
||||||
// reserve, commit and pin the entire memory region
|
// reserve, commit and pin the entire memory region
|
||||||
static char* reserve_memory_special(size_t size, char* addr = NULL,
|
static char* reserve_memory_special(size_t size, size_t alignment,
|
||||||
bool executable = false);
|
char* addr, bool executable);
|
||||||
static bool release_memory_special(char* addr, size_t bytes);
|
static bool release_memory_special(char* addr, size_t bytes);
|
||||||
static void large_page_init();
|
static void large_page_init();
|
||||||
static size_t large_page_size();
|
static size_t large_page_size();
|
||||||
|
@ -42,8 +42,19 @@
|
|||||||
|
|
||||||
|
|
||||||
// ReservedSpace
|
// ReservedSpace
|
||||||
|
|
||||||
|
// Dummy constructor
|
||||||
|
ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
|
||||||
|
_alignment(0), _special(false), _executable(false) {
|
||||||
|
}
|
||||||
|
|
||||||
ReservedSpace::ReservedSpace(size_t size) {
|
ReservedSpace::ReservedSpace(size_t size) {
|
||||||
initialize(size, 0, false, NULL, 0, false);
|
size_t page_size = os::page_size_for_region(size, size, 1);
|
||||||
|
bool large_pages = page_size != (size_t)os::vm_page_size();
|
||||||
|
// Don't force the alignment to be large page aligned,
|
||||||
|
// since that will waste memory.
|
||||||
|
size_t alignment = os::vm_allocation_granularity();
|
||||||
|
initialize(size, alignment, large_pages, NULL, 0, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
||||||
@ -129,16 +140,18 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
|||||||
|
|
||||||
if (special) {
|
if (special) {
|
||||||
|
|
||||||
base = os::reserve_memory_special(size, requested_address, executable);
|
base = os::reserve_memory_special(size, alignment, requested_address, executable);
|
||||||
|
|
||||||
if (base != NULL) {
|
if (base != NULL) {
|
||||||
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
|
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
|
||||||
// OS ignored requested address. Try different address.
|
// OS ignored requested address. Try different address.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Check alignment constraints
|
// Check alignment constraints.
|
||||||
assert((uintptr_t) base % alignment == 0,
|
assert((uintptr_t) base % alignment == 0,
|
||||||
"Large pages returned a non-aligned address");
|
err_msg("Large pages returned a non-aligned address, base: "
|
||||||
|
PTR_FORMAT " alignment: " PTR_FORMAT,
|
||||||
|
base, (void*)(uintptr_t)alignment));
|
||||||
_special = true;
|
_special = true;
|
||||||
} else {
|
} else {
|
||||||
// failed; try to reserve regular memory below
|
// failed; try to reserve regular memory below
|
||||||
@ -715,4 +728,188 @@ void VirtualSpace::print() {
|
|||||||
tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
|
tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
#define test_log(...) \
|
||||||
|
do {\
|
||||||
|
if (VerboseInternalVMTests) { \
|
||||||
|
tty->print_cr(__VA_ARGS__); \
|
||||||
|
tty->flush(); \
|
||||||
|
}\
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
class TestReservedSpace : AllStatic {
|
||||||
|
public:
|
||||||
|
static void small_page_write(void* addr, size_t size) {
|
||||||
|
size_t page_size = os::vm_page_size();
|
||||||
|
|
||||||
|
char* end = (char*)addr + size;
|
||||||
|
for (char* p = (char*)addr; p < end; p += page_size) {
|
||||||
|
*p = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void release_memory_for_test(ReservedSpace rs) {
|
||||||
|
if (rs.special()) {
|
||||||
|
guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
|
||||||
|
} else {
|
||||||
|
guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space1(size_t size, size_t alignment) {
|
||||||
|
test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
|
||||||
|
|
||||||
|
assert(is_size_aligned(size, alignment), "Incorrect input parameters");
|
||||||
|
|
||||||
|
ReservedSpace rs(size, // size
|
||||||
|
alignment, // alignment
|
||||||
|
UseLargePages, // large
|
||||||
|
NULL, // requested_address
|
||||||
|
0); // noacces_prefix
|
||||||
|
|
||||||
|
test_log(" rs.special() == %d", rs.special());
|
||||||
|
|
||||||
|
assert(rs.base() != NULL, "Must be");
|
||||||
|
assert(rs.size() == size, "Must be");
|
||||||
|
|
||||||
|
assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
|
||||||
|
assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
|
||||||
|
|
||||||
|
if (rs.special()) {
|
||||||
|
small_page_write(rs.base(), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
release_memory_for_test(rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space2(size_t size) {
|
||||||
|
test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
|
||||||
|
|
||||||
|
assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
|
||||||
|
|
||||||
|
ReservedSpace rs(size);
|
||||||
|
|
||||||
|
test_log(" rs.special() == %d", rs.special());
|
||||||
|
|
||||||
|
assert(rs.base() != NULL, "Must be");
|
||||||
|
assert(rs.size() == size, "Must be");
|
||||||
|
|
||||||
|
if (rs.special()) {
|
||||||
|
small_page_write(rs.base(), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
release_memory_for_test(rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
|
||||||
|
test_log("test_reserved_space3(%p, %p, %d)",
|
||||||
|
(void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
|
||||||
|
|
||||||
|
assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
|
||||||
|
assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
|
||||||
|
|
||||||
|
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||||
|
|
||||||
|
ReservedSpace rs(size, alignment, large, false);
|
||||||
|
|
||||||
|
test_log(" rs.special() == %d", rs.special());
|
||||||
|
|
||||||
|
assert(rs.base() != NULL, "Must be");
|
||||||
|
assert(rs.size() == size, "Must be");
|
||||||
|
|
||||||
|
if (rs.special()) {
|
||||||
|
small_page_write(rs.base(), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
release_memory_for_test(rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void test_reserved_space1() {
|
||||||
|
size_t size = 2 * 1024 * 1024;
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
test_reserved_space1(size, ag);
|
||||||
|
test_reserved_space1(size * 2, ag);
|
||||||
|
test_reserved_space1(size * 10, ag);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space2() {
|
||||||
|
size_t size = 2 * 1024 * 1024;
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
test_reserved_space2(size * 1);
|
||||||
|
test_reserved_space2(size * 2);
|
||||||
|
test_reserved_space2(size * 10);
|
||||||
|
test_reserved_space2(ag);
|
||||||
|
test_reserved_space2(size - ag);
|
||||||
|
test_reserved_space2(size);
|
||||||
|
test_reserved_space2(size + ag);
|
||||||
|
test_reserved_space2(size * 2);
|
||||||
|
test_reserved_space2(size * 2 - ag);
|
||||||
|
test_reserved_space2(size * 2 + ag);
|
||||||
|
test_reserved_space2(size * 3);
|
||||||
|
test_reserved_space2(size * 3 - ag);
|
||||||
|
test_reserved_space2(size * 3 + ag);
|
||||||
|
test_reserved_space2(size * 10);
|
||||||
|
test_reserved_space2(size * 10 + size / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space3() {
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
test_reserved_space3(ag, ag , false);
|
||||||
|
test_reserved_space3(ag * 2, ag , false);
|
||||||
|
test_reserved_space3(ag * 3, ag , false);
|
||||||
|
test_reserved_space3(ag * 2, ag * 2, false);
|
||||||
|
test_reserved_space3(ag * 4, ag * 2, false);
|
||||||
|
test_reserved_space3(ag * 8, ag * 2, false);
|
||||||
|
test_reserved_space3(ag * 4, ag * 4, false);
|
||||||
|
test_reserved_space3(ag * 8, ag * 4, false);
|
||||||
|
test_reserved_space3(ag * 16, ag * 4, false);
|
||||||
|
|
||||||
|
if (UseLargePages) {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
|
||||||
|
// Without large pages
|
||||||
|
test_reserved_space3(lp, ag * 4, false);
|
||||||
|
test_reserved_space3(lp * 2, ag * 4, false);
|
||||||
|
test_reserved_space3(lp * 4, ag * 4, false);
|
||||||
|
test_reserved_space3(lp, lp , false);
|
||||||
|
test_reserved_space3(lp * 2, lp , false);
|
||||||
|
test_reserved_space3(lp * 3, lp , false);
|
||||||
|
test_reserved_space3(lp * 2, lp * 2, false);
|
||||||
|
test_reserved_space3(lp * 4, lp * 2, false);
|
||||||
|
test_reserved_space3(lp * 8, lp * 2, false);
|
||||||
|
|
||||||
|
// With large pages
|
||||||
|
test_reserved_space3(lp, ag * 4 , true);
|
||||||
|
test_reserved_space3(lp * 2, ag * 4, true);
|
||||||
|
test_reserved_space3(lp * 4, ag * 4, true);
|
||||||
|
test_reserved_space3(lp, lp , true);
|
||||||
|
test_reserved_space3(lp * 2, lp , true);
|
||||||
|
test_reserved_space3(lp * 3, lp , true);
|
||||||
|
test_reserved_space3(lp * 2, lp * 2, true);
|
||||||
|
test_reserved_space3(lp * 4, lp * 2, true);
|
||||||
|
test_reserved_space3(lp * 8, lp * 2, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserved_space() {
|
||||||
|
test_reserved_space1();
|
||||||
|
test_reserved_space2();
|
||||||
|
test_reserved_space3();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void TestReservedSpace_test() {
|
||||||
|
TestReservedSpace::test_reserved_space();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // PRODUCT
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -53,6 +53,7 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
|
ReservedSpace();
|
||||||
ReservedSpace(size_t size);
|
ReservedSpace(size_t size);
|
||||||
ReservedSpace(size_t size, size_t alignment, bool large,
|
ReservedSpace(size_t size, size_t alignment, bool large,
|
||||||
char* requested_address = NULL,
|
char* requested_address = NULL,
|
||||||
|
@ -87,6 +87,8 @@ class MemTracker : AllStatic {
|
|||||||
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
|
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
|
||||||
static inline void record_virtual_memory_commit(address addr, size_t size,
|
static inline void record_virtual_memory_commit(address addr, size_t size,
|
||||||
address pc = 0, Thread* thread = NULL) { }
|
address pc = 0, Thread* thread = NULL) { }
|
||||||
|
static inline void record_virtual_memory_release(address addr, size_t size,
|
||||||
|
Thread* thread = NULL) { }
|
||||||
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
|
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
|
||||||
Thread* thread = NULL) { }
|
Thread* thread = NULL) { }
|
||||||
static inline Tracker get_realloc_tracker() { return _tkr; }
|
static inline Tracker get_realloc_tracker() { return _tkr; }
|
||||||
@ -372,6 +374,13 @@ class MemTracker : AllStatic {
|
|||||||
tkr.record(addr, size, flags, pc);
|
tkr.record(addr, size, flags, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void record_virtual_memory_release(address addr, size_t size,
|
||||||
|
Thread* thread = NULL) {
|
||||||
|
if (is_on()) {
|
||||||
|
Tracker tkr(Tracker::Release, thread);
|
||||||
|
tkr.record(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// record memory type on virtual memory base address
|
// record memory type on virtual memory base address
|
||||||
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
|
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
|
||||||
|
@ -402,6 +402,14 @@ const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000
|
|||||||
|
|
||||||
#define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
|
#define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
|
||||||
|
|
||||||
|
inline bool is_size_aligned(size_t size, size_t alignment) {
|
||||||
|
return align_size_up_(size, alignment) == size;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool is_ptr_aligned(void* ptr, size_t alignment) {
|
||||||
|
return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
|
||||||
|
}
|
||||||
|
|
||||||
inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
|
inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
|
||||||
return align_size_up_(size, alignment);
|
return align_size_up_(size, alignment);
|
||||||
}
|
}
|
||||||
@ -414,6 +422,14 @@ inline intptr_t align_size_down(intptr_t size, intptr_t alignment) {
|
|||||||
|
|
||||||
#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
|
#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
|
||||||
|
|
||||||
|
inline void* align_ptr_up(void* ptr, size_t alignment) {
|
||||||
|
return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void* align_ptr_down(void* ptr, size_t alignment) {
|
||||||
|
return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
|
||||||
|
}
|
||||||
|
|
||||||
// Align objects by rounding up their size, in HeapWord units.
|
// Align objects by rounding up their size, in HeapWord units.
|
||||||
|
|
||||||
#define align_object_size_(size) align_size_up_(size, MinObjAlignment)
|
#define align_object_size_(size) align_size_up_(size, MinObjAlignment)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user