6716785: implicit null checks not triggering with CompressedOops
Allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check Reviewed-by: kvn, jmasa, phh, jcoomes
This commit is contained in:
parent
575988272e
commit
20dba03e99
hotspot/src
os
os_cpu
linux_sparc/vm
linux_x86/vm
solaris_sparc/vm
solaris_x86/vm
windows_x86/vm
share/vm
asm
gc_implementation/parallelScavenge
memory
prims
runtime
@ -2414,8 +2414,20 @@ static bool linux_mprotect(char* addr, size_t size, int prot) {
|
||||
return ::mprotect(bottom, size, prot) == 0;
|
||||
}
|
||||
|
||||
bool os::protect_memory(char* addr, size_t size) {
|
||||
return linux_mprotect(addr, size, PROT_READ);
|
||||
// Set protections specified
|
||||
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
bool is_committed) {
|
||||
unsigned int p = 0;
|
||||
switch (prot) {
|
||||
case MEM_PROT_NONE: p = PROT_NONE; break;
|
||||
case MEM_PROT_READ: p = PROT_READ; break;
|
||||
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
|
||||
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
// is_committed is unused.
|
||||
return linux_mprotect(addr, bytes, p);
|
||||
}
|
||||
|
||||
bool os::guard_memory(char* addr, size_t size) {
|
||||
@ -3704,8 +3716,9 @@ void os::make_polling_page_unreadable(void) {
|
||||
|
||||
// Mark the polling page as readable
|
||||
void os::make_polling_page_readable(void) {
|
||||
if( !protect_memory((char *)_polling_page, Linux::page_size()) )
|
||||
if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
|
||||
fatal("Could not enable polling page");
|
||||
}
|
||||
};
|
||||
|
||||
int os::active_processor_count() {
|
||||
|
@ -2965,10 +2965,21 @@ static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
|
||||
return retVal == 0;
|
||||
}
|
||||
|
||||
// Protect memory (make it read-only. (Used to pass readonly pages through
|
||||
// Protect memory (Used to pass readonly pages through
|
||||
// JNI GetArray<type>Elements with empty arrays.)
|
||||
bool os::protect_memory(char* addr, size_t bytes) {
|
||||
return solaris_mprotect(addr, bytes, PROT_READ);
|
||||
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
bool is_committed) {
|
||||
unsigned int p = 0;
|
||||
switch (prot) {
|
||||
case MEM_PROT_NONE: p = PROT_NONE; break;
|
||||
case MEM_PROT_READ: p = PROT_READ; break;
|
||||
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
|
||||
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
// is_committed is unused.
|
||||
return solaris_mprotect(addr, bytes, p);
|
||||
}
|
||||
|
||||
// guard_memory and unguard_memory only happens within stack guard pages.
|
||||
|
@ -2170,6 +2170,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// Windows 98 reports faulting addresses incorrectly
|
||||
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
|
||||
!os::win32::is_nt()) {
|
||||
|
||||
return Handle_Exception(exceptionInfo,
|
||||
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
|
||||
}
|
||||
@ -2563,9 +2564,33 @@ bool os::release_memory(char* addr, size_t bytes) {
|
||||
return VirtualFree(addr, 0, MEM_RELEASE) != 0;
|
||||
}
|
||||
|
||||
bool os::protect_memory(char* addr, size_t bytes) {
|
||||
// Set protections specified
|
||||
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
bool is_committed) {
|
||||
unsigned int p = 0;
|
||||
switch (prot) {
|
||||
case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
|
||||
case MEM_PROT_READ: p = PAGE_READONLY; break;
|
||||
case MEM_PROT_RW: p = PAGE_READWRITE; break;
|
||||
case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
DWORD old_status;
|
||||
return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0;
|
||||
|
||||
// Strange enough, but on Win32 one can change protection only for committed
|
||||
// memory, not a big deal anyway, as bytes less or equal than 64K
|
||||
if (!is_committed && !commit_memory(addr, bytes)) {
|
||||
fatal("cannot commit protection page");
|
||||
}
|
||||
// One cannot use os::guard_memory() here, as on Win32 guard page
|
||||
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
|
||||
//
|
||||
// Pages in the region become guard pages. Any attempt to access a guard page
|
||||
// causes the system to raise a STATUS_GUARD_PAGE exception and turn off
|
||||
// the guard page status. Guard pages thus act as a one-time access alarm.
|
||||
return VirtualProtect(addr, bytes, p, &old_status) != 0;
|
||||
}
|
||||
|
||||
bool os::guard_memory(char* addr, size_t bytes) {
|
||||
|
@ -27,12 +27,6 @@
|
||||
|
||||
#include <asm-sparc/traps.h>
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Since the linux kernel resides at the low end of
|
||||
// user address space, no null pointer check is needed.
|
||||
return offset < 0 || offset >= 0x100000;
|
||||
}
|
||||
|
||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
||||
// No implementation
|
||||
breakpoint_trap();
|
||||
|
@ -39,10 +39,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
|
||||
movptr(thread, tls);
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Linux kernel guarantees that the first page is always unmapped. Don't
|
||||
// assume anything more than that.
|
||||
bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
|
||||
return !offset_in_first_page;
|
||||
}
|
||||
|
@ -65,22 +65,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
popq(rax);
|
||||
}
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Exception handler checks the nmethod's implicit null checks table
|
||||
// only when this method returns false.
|
||||
if (UseCompressedOops) {
|
||||
// The first page after heap_base is unmapped and
|
||||
// the 'offset' is equal to [heap_base + offset] for
|
||||
// narrow oop implicit null checks.
|
||||
uintptr_t heap_base = (uintptr_t)Universe::heap_base();
|
||||
if ((uintptr_t)offset >= heap_base) {
|
||||
// Normalize offset for the next check.
|
||||
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
||||
}
|
||||
}
|
||||
// Linux kernel guarantees that the first page is always unmapped. Don't
|
||||
// assume anything more than that.
|
||||
bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
|
||||
return !offset_in_first_page;
|
||||
}
|
||||
|
@ -28,18 +28,6 @@
|
||||
#include <sys/trap.h> // For trap numbers
|
||||
#include <v9/sys/psr_compat.h> // For V8 compatibility
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// The first page of virtual addresses is unmapped on SPARC.
|
||||
// Thus, any access the VM makes through a null pointer with an offset of
|
||||
// less than 4K will get a recognizable SIGSEGV, which the signal handler
|
||||
// will transform into a NullPointerException.
|
||||
// (Actually, the first 64K or so is unmapped, but it's simpler
|
||||
// to depend only on the first 4K or so.)
|
||||
|
||||
bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
|
||||
return !offset_in_first_page;
|
||||
}
|
||||
|
||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
||||
// Execute a trap to get the PSR, mask and shift
|
||||
// to get the condition codes.
|
||||
|
@ -79,9 +79,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
if (thread != rax) popl(rax);
|
||||
popl(thread);
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Identical to Sparc/Solaris code
|
||||
bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
|
||||
return !offset_in_first_page;
|
||||
}
|
||||
|
@ -85,22 +85,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
popq(rax);
|
||||
}
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Identical to Sparc/Solaris code
|
||||
|
||||
// Exception handler checks the nmethod's implicit null checks table
|
||||
// only when this method returns false.
|
||||
if (UseCompressedOops) {
|
||||
// The first page after heap_base is unmapped and
|
||||
// the 'offset' is equal to [heap_base + offset] for
|
||||
// narrow oop implicit null checks.
|
||||
uintptr_t heap_base = (uintptr_t)Universe::heap_base();
|
||||
if ((uintptr_t)offset >= heap_base) {
|
||||
// Normalize offset for the next check.
|
||||
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
||||
}
|
||||
}
|
||||
bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
|
||||
return !offset_in_first_page;
|
||||
}
|
||||
|
@ -58,7 +58,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
"Thread Pointer Offset has not been initialized");
|
||||
movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
return offset < 0 || (int)os::vm_page_size() <= offset;
|
||||
}
|
||||
|
@ -65,19 +65,3 @@ void MacroAssembler::get_thread(Register thread) {
|
||||
popq(rax);
|
||||
}
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Exception handler checks the nmethod's implicit null checks table
|
||||
// only when this method returns false.
|
||||
if (UseCompressedOops) {
|
||||
// The first page after heap_base is unmapped and
|
||||
// the 'offset' is equal to [heap_base + offset] for
|
||||
// narrow oop implicit null checks.
|
||||
uintptr_t heap_base = (uintptr_t)Universe::heap_base();
|
||||
if ((uintptr_t)offset >= heap_base) {
|
||||
// Normalize offset for the next check.
|
||||
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
||||
}
|
||||
}
|
||||
return offset < 0 || os::vm_page_size() <= offset;
|
||||
}
|
||||
|
@ -246,6 +246,24 @@ void AbstractAssembler::block_comment(const char* comment) {
|
||||
}
|
||||
}
|
||||
|
||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||
// Exception handler checks the nmethod's implicit null checks table
|
||||
// only when this method returns false.
|
||||
#ifndef SPARC
|
||||
// Sparc does not have based addressing
|
||||
if (UseCompressedOops) {
|
||||
// The first page after heap_base is unmapped and
|
||||
// the 'offset' is equal to [heap_base + offset] for
|
||||
// narrow oop implicit null checks.
|
||||
uintptr_t heap_base = (uintptr_t)Universe::heap_base();
|
||||
if ((uintptr_t)offset >= heap_base) {
|
||||
// Normalize offset for the next check.
|
||||
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
||||
}
|
||||
}
|
||||
#endif // SPARC
|
||||
return offset < 0 || os::vm_page_size() <= offset;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Label::print_instructions(MacroAssembler* masm) const {
|
||||
|
@ -61,6 +61,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
if (_virtual_space != NULL) {
|
||||
delete _virtual_space;
|
||||
_virtual_space = NULL;
|
||||
// Release memory reserved in the space.
|
||||
rs.release();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -108,8 +108,8 @@ jint ParallelScavengeHeap::initialize() {
|
||||
// size than is needed or wanted for the perm gen. Use the "compound
|
||||
// alignment" ReservedSpace ctor to avoid having to use the same page size for
|
||||
// all gens.
|
||||
ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
|
||||
og_align);
|
||||
ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
|
||||
og_align);
|
||||
os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
|
||||
heap_rs.base(), pg_max_size);
|
||||
os::trace_page_sizes("ps main", og_min_size + yg_min_size,
|
||||
|
@ -422,6 +422,8 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
return vspace;
|
||||
}
|
||||
delete vspace;
|
||||
// Release memory reserved in the space.
|
||||
rs.release();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -71,13 +71,8 @@ bool PSVirtualSpace::contains(void* p) const {
|
||||
|
||||
void PSVirtualSpace::release() {
|
||||
DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
|
||||
if (reserved_low_addr() != NULL) {
|
||||
if (special()) {
|
||||
os::release_memory_special(reserved_low_addr(), reserved_size());
|
||||
} else {
|
||||
(void)os::release_memory(reserved_low_addr(), reserved_size());
|
||||
}
|
||||
}
|
||||
// This may not release memory it didn't reserve.
|
||||
// Use rs.release() to release the underlying memory instead.
|
||||
_reserved_low_addr = _reserved_high_addr = NULL;
|
||||
_committed_low_addr = _committed_high_addr = NULL;
|
||||
_special = false;
|
||||
|
@ -222,8 +222,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
||||
|
||||
*_total_reserved = total_reserved;
|
||||
*_n_covered_regions = n_covered_regions;
|
||||
*heap_rs = ReservedSpace(total_reserved, alignment,
|
||||
UseLargePages, heap_address);
|
||||
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
|
||||
UseLargePages, heap_address);
|
||||
|
||||
return heap_address;
|
||||
}
|
||||
|
@ -2173,8 +2173,7 @@ static char* get_bad_address() {
|
||||
size_t size = os::vm_allocation_granularity();
|
||||
bad_address = os::reserve_memory(size);
|
||||
if (bad_address != NULL) {
|
||||
os::commit_memory(bad_address, size);
|
||||
os::protect_memory(bad_address, size);
|
||||
os::protect_memory(bad_address, size, os::MEM_PROT_READ);
|
||||
}
|
||||
}
|
||||
return bad_address;
|
||||
|
@ -1176,9 +1176,7 @@ void Arguments::set_ergonomics_flags() {
|
||||
// by ergonomics.
|
||||
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
// Leave compressed oops off by default. Uncomment
|
||||
// the following line to return it to default status.
|
||||
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
}
|
||||
} else {
|
||||
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
|
@ -922,8 +922,9 @@ void os::serialize_thread_states() {
|
||||
// time and expensive page trap spinning, 'SerializePageLock' is used to block
|
||||
// the mutator thread if such case is encountered. See bug 6546278 for details.
|
||||
Thread::muxAcquire(&SerializePageLock, "serialize_thread_states");
|
||||
os::protect_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
|
||||
os::unguard_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
|
||||
os::protect_memory((char *)os::get_memory_serialize_page(),
|
||||
os::vm_page_size(), MEM_PROT_READ, /*is_committed*/true );
|
||||
os::unguard_memory((char *)os::get_memory_serialize_page(), os::vm_page_size());
|
||||
Thread::muxRelease(&SerializePageLock);
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,11 @@ class os: AllStatic {
|
||||
static bool commit_memory(char* addr, size_t size, size_t alignment_hint);
|
||||
static bool uncommit_memory(char* addr, size_t bytes);
|
||||
static bool release_memory(char* addr, size_t bytes);
|
||||
static bool protect_memory(char* addr, size_t bytes);
|
||||
|
||||
enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
|
||||
static bool protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
bool is_committed = false);
|
||||
|
||||
static bool guard_memory(char* addr, size_t bytes);
|
||||
static bool unguard_memory(char* addr, size_t bytes);
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
|
@ -28,12 +28,15 @@
|
||||
|
||||
// ReservedSpace
|
||||
ReservedSpace::ReservedSpace(size_t size) {
|
||||
initialize(size, 0, false, NULL);
|
||||
initialize(size, 0, false, NULL, 0);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
||||
bool large, char* requested_address) {
|
||||
initialize(size, alignment, large, requested_address);
|
||||
bool large,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix) {
|
||||
initialize(size+noaccess_prefix, alignment, large, requested_address,
|
||||
noaccess_prefix);
|
||||
}
|
||||
|
||||
char *
|
||||
@ -105,7 +108,8 @@ char* ReservedSpace::reserve_and_align(const size_t reserve_size,
|
||||
ReservedSpace::ReservedSpace(const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align)
|
||||
const size_t suffix_align,
|
||||
const size_t noaccess_prefix)
|
||||
{
|
||||
assert(prefix_size != 0, "sanity");
|
||||
assert(prefix_align != 0, "sanity");
|
||||
@ -118,12 +122,16 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
|
||||
assert((suffix_align & prefix_align - 1) == 0,
|
||||
"suffix_align not divisible by prefix_align");
|
||||
|
||||
// Add in noaccess_prefix to prefix_size;
|
||||
const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
|
||||
const size_t size = adjusted_prefix_size + suffix_size;
|
||||
|
||||
// On systems where the entire region has to be reserved and committed up
|
||||
// front, the compound alignment normally done by this method is unnecessary.
|
||||
const bool try_reserve_special = UseLargePages &&
|
||||
prefix_align == os::large_page_size();
|
||||
if (!os::can_commit_large_page_memory() && try_reserve_special) {
|
||||
initialize(prefix_size + suffix_size, prefix_align, true);
|
||||
initialize(size, prefix_align, true, NULL, noaccess_prefix);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -131,15 +139,19 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
|
||||
_size = 0;
|
||||
_alignment = 0;
|
||||
_special = false;
|
||||
_noaccess_prefix = 0;
|
||||
|
||||
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
|
||||
assert(noaccess_prefix == 0 ||
|
||||
noaccess_prefix == prefix_align, "noaccess prefix wrong");
|
||||
|
||||
// Optimistically try to reserve the exact size needed.
|
||||
const size_t size = prefix_size + suffix_size;
|
||||
char* addr = os::reserve_memory(size, NULL, prefix_align);
|
||||
if (addr == NULL) return;
|
||||
|
||||
// Check whether the result has the needed alignment (unlikely unless
|
||||
// prefix_align == suffix_align).
|
||||
const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
|
||||
const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
|
||||
if (ofs != 0) {
|
||||
// Wrong alignment. Release, allocate more space and do manual alignment.
|
||||
//
|
||||
@ -153,11 +165,11 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
|
||||
}
|
||||
|
||||
const size_t extra = MAX2(ofs, suffix_align - ofs);
|
||||
addr = reserve_and_align(size + extra, prefix_size, prefix_align,
|
||||
addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
|
||||
suffix_size, suffix_align);
|
||||
if (addr == NULL) {
|
||||
// Try an even larger region. If this fails, address space is exhausted.
|
||||
addr = reserve_and_align(size + suffix_align, prefix_size,
|
||||
addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
|
||||
prefix_align, suffix_size, suffix_align);
|
||||
}
|
||||
}
|
||||
@ -165,10 +177,12 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
|
||||
_base = addr;
|
||||
_size = size;
|
||||
_alignment = prefix_align;
|
||||
_noaccess_prefix = noaccess_prefix;
|
||||
}
|
||||
|
||||
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
char* requested_address) {
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix) {
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & granularity - 1) == 0,
|
||||
"size not aligned to os::vm_allocation_granularity()");
|
||||
@ -181,6 +195,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
_size = 0;
|
||||
_special = false;
|
||||
_alignment = 0;
|
||||
_noaccess_prefix = 0;
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
@ -220,7 +235,8 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
// important. If available space is not detected, return NULL.
|
||||
|
||||
if (requested_address != 0) {
|
||||
base = os::attempt_reserve_memory_at(size, requested_address);
|
||||
base = os::attempt_reserve_memory_at(size,
|
||||
requested_address-noaccess_prefix);
|
||||
} else {
|
||||
base = os::reserve_memory(size, NULL, alignment);
|
||||
}
|
||||
@ -259,6 +275,11 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
_base = base;
|
||||
_size = size;
|
||||
_alignment = MAX2(alignment, (size_t) os::vm_page_size());
|
||||
_noaccess_prefix = noaccess_prefix;
|
||||
|
||||
// Assert that if noaccess_prefix is used, it is the same as alignment.
|
||||
assert(noaccess_prefix == 0 ||
|
||||
noaccess_prefix == _alignment, "noaccess prefix wrong");
|
||||
|
||||
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
|
||||
"area must be distinguisable from marks for mark-sweep");
|
||||
@ -274,6 +295,7 @@ ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
|
||||
_base = base;
|
||||
_size = size;
|
||||
_alignment = alignment;
|
||||
_noaccess_prefix = 0;
|
||||
_special = special;
|
||||
}
|
||||
|
||||
@ -320,17 +342,58 @@ size_t ReservedSpace::allocation_align_size_down(size_t size) {
|
||||
|
||||
void ReservedSpace::release() {
|
||||
if (is_reserved()) {
|
||||
char *real_base = _base - _noaccess_prefix;
|
||||
const size_t real_size = _size + _noaccess_prefix;
|
||||
if (special()) {
|
||||
os::release_memory_special(_base, _size);
|
||||
os::release_memory_special(real_base, real_size);
|
||||
} else{
|
||||
os::release_memory(_base, _size);
|
||||
os::release_memory(real_base, real_size);
|
||||
}
|
||||
_base = NULL;
|
||||
_size = 0;
|
||||
_noaccess_prefix = 0;
|
||||
_special = false;
|
||||
}
|
||||
}
|
||||
|
||||
void ReservedSpace::protect_noaccess_prefix(const size_t size) {
|
||||
// If there is noaccess prefix, return.
|
||||
if (_noaccess_prefix == 0) return;
|
||||
|
||||
assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
|
||||
"must be at least page size big");
|
||||
|
||||
// Protect memory at the base of the allocated region.
|
||||
// If special, the page was committed (only matters on windows)
|
||||
if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
|
||||
_special)) {
|
||||
fatal("cannot protect protection page");
|
||||
}
|
||||
|
||||
_base += _noaccess_prefix;
|
||||
_size -= _noaccess_prefix;
|
||||
assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
|
||||
"must be exactly of required size and alignment");
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
|
||||
bool large, char* requested_address) :
|
||||
ReservedSpace(size, alignment, large,
|
||||
requested_address,
|
||||
UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
|
||||
// Only reserved space for the java heap should have a noaccess_prefix
|
||||
// if using compressed oops.
|
||||
protect_noaccess_prefix(size);
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align) :
|
||||
ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
|
||||
UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
|
||||
protect_noaccess_prefix(prefix_size+suffix_size);
|
||||
}
|
||||
|
||||
// VirtualSpace
|
||||
|
||||
@ -348,6 +411,7 @@ VirtualSpace::VirtualSpace() {
|
||||
_lower_alignment = 0;
|
||||
_middle_alignment = 0;
|
||||
_upper_alignment = 0;
|
||||
_special = false;
|
||||
}
|
||||
|
||||
|
||||
@ -402,7 +466,8 @@ VirtualSpace::~VirtualSpace() {
|
||||
|
||||
|
||||
void VirtualSpace::release() {
|
||||
(void)os::release_memory(low_boundary(), reserved_size());
|
||||
// This does not release memory it never reserved.
|
||||
// Caller must release via rs.release();
|
||||
_low_boundary = NULL;
|
||||
_high_boundary = NULL;
|
||||
_low = NULL;
|
||||
|
@ -29,13 +29,15 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _noaccess_prefix;
|
||||
size_t _alignment;
|
||||
bool _special;
|
||||
|
||||
// ReservedSpace
|
||||
ReservedSpace(char* base, size_t size, size_t alignment, bool special);
|
||||
void initialize(size_t size, size_t alignment, bool large,
|
||||
char* requested_address = NULL);
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix);
|
||||
|
||||
// Release parts of an already-reserved memory region [addr, addr + len) to
|
||||
// get a new region that has "compound alignment." Return the start of the
|
||||
@ -59,13 +61,19 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align);
|
||||
|
||||
protected:
|
||||
// Create protection page at the beginning of the space.
|
||||
void protect_noaccess_prefix(const size_t size);
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
ReservedSpace(size_t size);
|
||||
ReservedSpace(size_t size, size_t alignment, bool large,
|
||||
char* requested_address = NULL);
|
||||
char* requested_address = NULL,
|
||||
const size_t noaccess_prefix = 0);
|
||||
ReservedSpace(const size_t prefix_size, const size_t prefix_align,
|
||||
const size_t suffix_size, const size_t suffix_align);
|
||||
const size_t suffix_size, const size_t suffix_align,
|
||||
const size_t noaccess_prefix);
|
||||
|
||||
// Accessors
|
||||
char* base() const { return _base; }
|
||||
@ -73,6 +81,8 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
size_t alignment() const { return _alignment; }
|
||||
bool special() const { return _special; }
|
||||
|
||||
size_t noaccess_prefix() const { return _noaccess_prefix; }
|
||||
|
||||
bool is_reserved() const { return _base != NULL; }
|
||||
void release();
|
||||
|
||||
@ -104,6 +114,16 @@ ReservedSpace ReservedSpace::last_part(size_t partition_size)
|
||||
return last_part(partition_size, alignment());
|
||||
}
|
||||
|
||||
// Class encapsulating behavior specific of memory space reserved for Java heap
|
||||
class ReservedHeapSpace : public ReservedSpace {
|
||||
public:
|
||||
// Constructor
|
||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment,
|
||||
bool large, char* requested_address);
|
||||
ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
|
||||
const size_t suffix_size, const size_t suffix_align);
|
||||
};
|
||||
|
||||
// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
|
||||
|
||||
class VirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||
|
Loading…
x
Reference in New Issue
Block a user