8261527: Record page size used for underlying mapping in ReservedSpace

Reviewed-by: rkennke, iwalulya
This commit is contained in:
Stefan Johansson 2021-05-04 09:00:10 +00:00
parent 8e071c4b52
commit 141cc2f2a3
20 changed files with 109 additions and 117 deletions

View File

@ -341,7 +341,7 @@ size_t ArchiveBuilder::estimate_archive_size() {
address ArchiveBuilder::reserve_buffer() {
size_t buffer_size = estimate_archive_size();
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), false);
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
if (!rs.is_reserved()) {
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
vm_direct_exit(0);

View File

@ -1220,7 +1220,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
false /* bool large */, (char*)base_address);
os::vm_page_size(), (char*)base_address);
if (archive_space_rs.is_reserved()) {
assert(base_address == NULL ||
(address)archive_space_rs.base() == base_address, "Sanity");
@ -1269,9 +1269,9 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// via sequential file IO.
address ccs_base = base_address + archive_space_size + gap_size;
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
false /* large */, (char*)base_address);
os::vm_page_size(), (char*)base_address);
class_space_rs = ReservedSpace(class_space_size, class_space_alignment,
false /* large */, (char*)ccs_base);
os::vm_page_size(), (char*)ccs_base);
}
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
@ -1280,7 +1280,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
} else {
if (use_archive_base_addr && base_address != nullptr) {
total_space_rs = ReservedSpace(total_range_size, archive_space_alignment,
false /* bool large */, (char*) base_address);
os::vm_page_size(), (char*) base_address);
} else {
// Reserve at any address, but leave it up to the platform to choose a good one.
total_space_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);

View File

@ -336,7 +336,7 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
if (!rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
rs_size/K));

View File

@ -1497,7 +1497,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs(size, preferred_page_size);
size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
size_t page_size = rs.page_size();
G1RegionToSpaceMapper* result =
G1RegionToSpaceMapper::create_mapper(rs,
size,
@ -1589,7 +1589,7 @@ jint G1CollectedHeap::initialize() {
_hot_card_cache = new G1HotCardCache(this);
// Create space mappers.
size_t page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
size_t page_size = heap_rs.page_size();
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(heap_rs,
heap_rs.size(),

View File

@ -49,8 +49,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
const size_t used_page_sz = ReservedSpace::actual_reserved_page_size(rs);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
const size_t used_page_sz = rs.page_size();
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, used_page_sz,
rs.base(), rs.size());

View File

@ -748,7 +748,7 @@ void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
// Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
if(log_is_enabled(Info, pagesize)) {
const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
const size_t page_size = rs.page_size();
os::trace_page_sizes("Heap",
MinHeapSize,
reserved_heap_size,

View File

@ -446,7 +446,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
rs.size());

View File

@ -78,7 +78,7 @@ void CardTable::initialize() {
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);

View File

@ -172,7 +172,7 @@ ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
SIZE_FORMAT, total_reserved, alignment);
ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
size_t used_page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
size_t used_page_size = heap_rs.page_size();
os::trace_page_sizes("Heap",
MinHeapSize,

View File

@ -298,7 +298,7 @@ jint ShenandoahHeap::initialize() {
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
char* req_addr = (char*)addr;
assert(is_aligned(req_addr, cset_align), "Should be aligned");
ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
if (cset_rs.is_reserved()) {
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
@ -307,7 +307,7 @@ jint ShenandoahHeap::initialize() {
}
if (_collection_set == NULL) {
ReservedSpace cset_rs(cset_size, cset_align, false);
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
}

View File

@ -104,7 +104,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
_rs = ReservedSpace(reservation_size_request_bytes,
os::vm_allocation_granularity(),
UseLargePages && os::can_commit_large_page_memory());
os::vm_page_size());
if (!_rs.is_reserved()) {
return false;
}

View File

@ -207,7 +207,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_log2_segment_size = exact_log2(segment_size);
// Reserve and initialize space for _memory.
const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
const size_t page_size = rs.page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t c_size = align_up(committed_size, page_size);
assert(c_size <= rs.size(), "alignment made committed size to large");

View File

@ -563,7 +563,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
while (a < search_ranges[i].to) {
ReservedSpace rs(size, Metaspace::reserve_alignment(),
false /*large_pages*/, (char*)a);
os::vm_page_size(), (char*)a);
if (rs.is_reserved()) {
assert(a == (address)rs.base(), "Sanity");
return rs;
@ -579,7 +579,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
return ReservedSpace();
#else
// Default implementation: Just reserve anywhere.
return ReservedSpace(size, Metaspace::reserve_alignment(), false, (char*)NULL);
return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)NULL);
#endif // AARCH64
}
@ -717,7 +717,7 @@ void Metaspace::global_initialize() {
if (base != NULL) {
if (CompressedKlassPointers::is_valid_base(base)) {
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
false /* large */, (char*)base);
os::vm_page_size(), (char*)base);
}
}

View File

@ -71,7 +71,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
reserve_limit, Metaspace::reserve_alignment_words());
if (reserve_limit > 0) {
// have reserve limit -> non-expandable context
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), false);
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
} else {
// no reserve limit -> expandable vslist

View File

@ -244,8 +244,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
ReservedSpace rs(word_size * BytesPerWord,
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
false // large
);
os::vm_page_size());
if (!rs.is_reserved()) {
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
}

View File

@ -826,13 +826,17 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
assert(!UseLargePages
|| UseParallelGC
|| use_large_pages, "Wrong alignment to use large pages");
size_t page_size = os::vm_page_size();
if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
page_size = os::large_page_size();
} else {
// Parallel is the only collector that might opt out of using large pages
// for the heap.
assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
}
// Now create the space.
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, AllocateHeapAt);
ReservedHeapSpace total_rs(total_reserved, alignment, page_size, AllocateHeapAt);
if (total_rs.is_reserved()) {
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
@ -858,7 +862,7 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// satisfy compiler
ShouldNotReachHere();
return ReservedHeapSpace(0, 0, false);
return ReservedHeapSpace(0, 0, os::vm_page_size());
}
OopStorage* Universe::vm_weak() {

View File

@ -50,8 +50,7 @@ ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
// large and normal pages.
size_t page_size = os::page_size_for_region_unaligned(size, 1);
size_t alignment = os::vm_allocation_granularity();
bool large_pages = page_size != (size_t)os::vm_page_size();
initialize(size, alignment, large_pages, NULL, false);
initialize(size, alignment, page_size, NULL, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
@ -59,25 +58,25 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_
// and normal pages. If the size is not a multiple of the
// page size it will be aligned up to achieve this.
size_t alignment = os::vm_allocation_granularity();;
bool large_pages = preferred_page_size != (size_t)os::vm_page_size();
if (large_pages) {
if (preferred_page_size != (size_t)os::vm_page_size()) {
alignment = MAX2(preferred_page_size, alignment);
size = align_up(size, alignment);
}
initialize(size, alignment, large_pages, NULL, false);
initialize(size, alignment, preferred_page_size, NULL, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
ReservedSpace::ReservedSpace(size_t size,
size_t alignment,
size_t page_size,
char* requested_address) : _fd_for_heap(-1) {
initialize(size, alignment, large, requested_address, false);
initialize(size, alignment, page_size, requested_address, false);
}
ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
bool special, bool executable) : _fd_for_heap(-1) {
assert((size % os::vm_allocation_granularity()) == 0,
"size not allocation aligned");
initialize_members(base, size, alignment, special, executable);
initialize_members(base, size, alignment, page_size, special, executable);
}
// Helper method
@ -130,8 +129,9 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address)
return true;
}
static bool use_explicit_large_pages(bool large) {
return !os::can_commit_large_page_memory() && large;
static bool use_explicit_large_pages(size_t page_size) {
return !os::can_commit_large_page_memory() &&
page_size != (size_t) os::vm_page_size();
}
static bool large_pages_requested() {
@ -192,20 +192,23 @@ static char* reserve_memory_special(char* requested_address, const size_t size,
}
void ReservedSpace::clear_members() {
initialize_members(NULL, 0, 0, false, false);
initialize_members(NULL, 0, 0, 0, false, false);
}
void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
bool special, bool executable) {
size_t page_size, bool special, bool executable) {
_base = base;
_size = size;
_alignment = alignment;
_page_size = page_size;
_special = special;
_executable = executable;
_noaccess_prefix = 0;
}
void ReservedSpace::reserve(size_t size, size_t alignment, bool large,
void ReservedSpace::reserve(size_t size,
size_t alignment,
size_t page_size,
char* requested_address,
bool executable) {
assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
@ -223,11 +226,11 @@ void ReservedSpace::reserve(size_t size, size_t alignment, bool large,
// So UseLargePages is not taken into account for this reservation.
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
if (base != NULL) {
initialize_members(base, size, alignment, true, executable);
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
}
// Always return, not possible to fall back to reservation not using a file.
return;
} else if (use_explicit_large_pages(large)) {
} else if (use_explicit_large_pages(page_size)) {
// System can't commit large pages i.e. use transparent huge pages and
// the caller requested large pages. To satisfy this request we use
// explicit large pages and these have to be committed up front to ensure
@ -236,21 +239,24 @@ void ReservedSpace::reserve(size_t size, size_t alignment, bool large,
char* base = reserve_memory_special(requested_address, size, alignment, executable);
if (base != NULL) {
// Successful reservation using large pages.
initialize_members(base, size, alignment, true, executable);
initialize_members(base, size, alignment, page_size, true, executable);
return;
}
// Failed to reserve explicit large pages, fall back to normal reservation.
page_size = os::vm_page_size();
}
// Not a 'special' reservation.
char* base = reserve_memory(requested_address, size, alignment, -1, executable);
if (base != NULL) {
// Successful mapping.
initialize_members(base, size, alignment, false, executable);
initialize_members(base, size, alignment, page_size, false, executable);
}
}
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
void ReservedSpace::initialize(size_t size,
size_t alignment,
size_t page_size,
char* requested_address,
bool executable) {
const size_t granularity = os::vm_allocation_granularity();
@ -260,6 +266,8 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
"alignment not aligned to os::vm_allocation_granularity()");
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
"not a power of 2");
assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size");
assert(is_power_of_2(page_size), "Invalid page size");
clear_members();
@ -271,7 +279,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
alignment = MAX2(alignment, (size_t)os::vm_page_size());
// Reserve the memory.
reserve(size, alignment, large, requested_address, executable);
reserve(size, alignment, page_size, requested_address, executable);
// Check that the requested address is used if given.
if (failed_to_reserve_as_requested(_base, requested_address)) {
@ -283,7 +291,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
assert(partition_size <= size(), "partition failed");
ReservedSpace result(base(), partition_size, alignment, special(), executable());
ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
return result;
}
@ -292,7 +300,7 @@ ReservedSpace
ReservedSpace::last_part(size_t partition_size, size_t alignment) {
assert(partition_size <= size(), "partition failed");
ReservedSpace result(base() + partition_size, size() - partition_size,
alignment, special(), executable());
alignment, page_size(), special(), executable());
return result;
}
@ -311,25 +319,6 @@ size_t ReservedSpace::allocation_align_size_up(size_t size) {
return align_up(size, os::vm_allocation_granularity());
}
size_t ReservedSpace::actual_reserved_page_size(const ReservedSpace& rs) {
size_t page_size = os::vm_page_size();
if (UseLargePages) {
// There are two ways to manage large page memory.
// 1. OS supports committing large page memory.
// 2. OS doesn't support committing large page memory so ReservedSpace manages it.
// And ReservedSpace calls it 'special'. If we failed to set 'special',
// we reserved memory without large page.
if (os::can_commit_large_page_memory() || rs.special()) {
// An alignment at ReservedSpace comes from preferred page size or
// heap alignment, and if the alignment came from heap alignment, it could be
// larger than large pages size. So need to cap with the large page size.
page_size = MIN2(rs.alignment(), os::large_page_size());
}
}
return page_size;
}
void ReservedSpace::release() {
if (is_reserved()) {
char *real_base = _base - _noaccess_prefix;
@ -386,7 +375,7 @@ void ReservedHeapSpace::establish_noaccess_prefix() {
// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
void ReservedHeapSpace::try_reserve_heap(size_t size,
size_t alignment,
bool large,
size_t page_size,
char* requested_address) {
if (_base != NULL) {
// We tried before, but we didn't like the address delivered.
@ -399,7 +388,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
p2i(requested_address),
size);
reserve(size, alignment, large, requested_address, false);
reserve(size, alignment, page_size, requested_address, false);
// Check alignment constraints.
if (is_reserved() && !is_aligned(_base, _alignment)) {
@ -415,7 +404,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
char *upper_bound,
size_t size,
size_t alignment,
bool large) {
size_t page_size) {
const size_t attach_range = highest_start - lowest_start;
// Cap num_attempts at possible number.
// At least one is possible even for 0 sized attach range.
@ -431,7 +420,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
attach_point <= highest_start && // Avoid wrap around.
((_base == NULL) ||
(_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
try_reserve_heap(size, alignment, large, attach_point);
try_reserve_heap(size, alignment, page_size, attach_point);
attach_point -= stepsize;
}
}
@ -482,7 +471,7 @@ static char** get_attach_addresses_for_disjoint_mode() {
return (char**) &addresses[start];
}
void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
"can not allocate compressed oop heap for this size");
guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
@ -508,7 +497,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Attempt to alloc at user-given address.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
release();
}
@ -534,7 +523,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
}
// zerobased: Attempt to allocate in the lower 32G.
@ -566,7 +555,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
}
lowest_start = align_up(lowest_start, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
aligned_heap_base_min_address, zerobased_max, size, alignment, large);
aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
}
// Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
@ -582,19 +571,19 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
!CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
char* const attach_point = addresses[i];
assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
i++;
}
// Last, desperate try without any placement.
if (_base == NULL) {
log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
initialize(size + noaccess_prefix, alignment, large, NULL, false);
initialize(size + noaccess_prefix, alignment, page_size, NULL, false);
}
}
}
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
if (size == 0) {
return;
@ -609,7 +598,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
// When there is a backing file directory for this space then whether
// large pages are allocated is up to the filesystem of the backing file.
// If requested, let the user know that explicit large pages can't be used.
if (use_explicit_large_pages(large) && large_pages_requested()) {
if (use_explicit_large_pages(page_size) && large_pages_requested()) {
log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
}
}
@ -618,7 +607,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
guarantee(is_aligned(size, alignment), "set by caller");
if (UseCompressedOops) {
initialize_compressed_heap(size, alignment, large);
initialize_compressed_heap(size, alignment, page_size);
if (_size > size) {
// We allocated heap with noaccess prefix.
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
@ -626,7 +615,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
establish_noaccess_prefix();
}
} else {
initialize(size, alignment, large, NULL, false);
initialize(size, alignment, page_size, NULL, false);
}
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
@ -651,8 +640,8 @@ MemRegion ReservedHeapSpace::region() const {
// executable.
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
size_t rs_align,
bool large) : ReservedSpace() {
initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true);
size_t rs_page_size) : ReservedSpace() {
initialize(r_size, rs_align, rs_page_size, /*requested address*/ NULL, /*executable*/ true);
MemTracker::record_virtual_memory_type((address)base(), mtCode);
}

View File

@ -39,14 +39,15 @@ class ReservedSpace {
size_t _size;
size_t _noaccess_prefix;
size_t _alignment;
size_t _page_size;
bool _special;
int _fd_for_heap;
private:
bool _executable;
// ReservedSpace
ReservedSpace(char* base, size_t size, size_t alignment, bool special,
bool executable);
ReservedSpace(char* base, size_t size, size_t alignment,
size_t page_size, bool special, bool executable);
protected:
// Helpers to clear and set members during initialization. Two members
// require special treatment:
@ -57,15 +58,13 @@ class ReservedSpace {
// 0 during initialization.
void clear_members();
void initialize_members(char* base, size_t size, size_t alignment,
bool special, bool executable);
size_t page_size, bool special, bool executable);
void initialize(size_t size, size_t alignment, bool large,
char* requested_address,
bool executable);
void initialize(size_t size, size_t alignment, size_t page_size,
char* requested_address, bool executable);
void reserve(size_t size, size_t alignment, bool large,
char* requested_address,
bool executable);
void reserve(size_t size, size_t alignment, size_t page_size,
char* requested_address, bool executable);
public:
// Constructor
ReservedSpace();
@ -77,7 +76,7 @@ class ReservedSpace {
// the given size is not aligned to that value, as the reservation will be
// aligned up to the final alignment in this case.
ReservedSpace(size_t size, size_t preferred_page_size);
ReservedSpace(size_t size, size_t alignment, bool large,
ReservedSpace(size_t size, size_t alignment, size_t page_size,
char* requested_address = NULL);
// Accessors
@ -85,6 +84,7 @@ class ReservedSpace {
size_t size() const { return _size; }
char* end() const { return _base + _size; }
size_t alignment() const { return _alignment; }
size_t page_size() const { return _page_size; }
bool special() const { return _special; }
bool executable() const { return _executable; }
size_t noaccess_prefix() const { return _noaccess_prefix; }
@ -107,8 +107,6 @@ class ReservedSpace {
bool contains(const void* p) const {
return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
}
static size_t actual_reserved_page_size(const ReservedSpace& rs);
};
ReservedSpace
@ -125,19 +123,19 @@ ReservedSpace ReservedSpace::last_part(size_t partition_size)
// Class encapsulating behavior specific of memory space reserved for Java heap.
class ReservedHeapSpace : public ReservedSpace {
private:
void try_reserve_heap(size_t size, size_t alignment, bool large,
void try_reserve_heap(size_t size, size_t alignment, size_t page_size,
char *requested_address);
void try_reserve_range(char *highest_start, char *lowest_start,
size_t attach_point_alignment, char *aligned_HBMA,
char *upper_bound, size_t size, size_t alignment, bool large);
void initialize_compressed_heap(const size_t size, size_t alignment, bool large);
char *upper_bound, size_t size, size_t alignment, size_t page_size);
void initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size);
// Create protection page at the beginning of the space.
void establish_noaccess_prefix();
public:
// Constructor. Tries to find a heap that is good for compressed oops.
// heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated
// on the device which is managed by the file system where the directory resides.
ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
ReservedHeapSpace(size_t size, size_t forced_base_alignment, size_t page_size, const char* heap_allocation_directory = NULL);
// Returns the base to be used for compression, i.e. so that null can be
// encoded safely and implicit null checks can work.
char *compressed_oop_base() const { return _base - _noaccess_prefix; }
@ -148,7 +146,7 @@ class ReservedHeapSpace : public ReservedSpace {
class ReservedCodeSpace : public ReservedSpace {
public:
// Constructor
ReservedCodeSpace(size_t r_size, size_t rs_align, bool large);
ReservedCodeSpace(size_t r_size, size_t rs_align, size_t page_size);
};
// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.

View File

@ -251,7 +251,7 @@ WB_END
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs(100 * granularity, granularity, false);
ReservedHeapSpace rhs(100 * granularity, granularity, os::vm_page_size());
VirtualSpace vs;
vs.initialize(rhs, 50 * granularity);
@ -278,7 +278,7 @@ WB_END
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
size_t magnitude, size_t iterations) {
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false);
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, os::vm_page_size());
VirtualSpace vs;
if (!vs.initialize(rhs, 0)) {
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");

View File

@ -77,8 +77,8 @@ namespace {
static void test_reserved_size_alignment(size_t size, size_t alignment) {
ASSERT_PRED2(is_size_aligned, size, alignment) << "Incorrect input parameters";
ReservedSpace rs(size, alignment, UseLargePages, (char *) NULL);
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
ReservedSpace rs(size, alignment, page_size, (char *) NULL);
ASSERT_TRUE(rs.base() != NULL) << "rs.special = " << rs.special();
ASSERT_EQ(size, rs.size()) << "rs.special = " << rs.special();
@ -104,8 +104,9 @@ namespace {
ASSERT_PRED2(is_size_aligned, size, alignment) << "Must be at least AG aligned";
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
ReservedSpace rs(size, alignment, large);
ReservedSpace rs(size, alignment, page_size);
MemoryReleaser releaser(&rs);
EXPECT_TRUE(rs.base() != NULL) << "rs.special: " << rs.special();
@ -219,7 +220,7 @@ namespace {
case Commit:
return ReservedSpace(reserve_size_aligned,
os::vm_allocation_granularity(),
/* large */ false);
os::vm_page_size());
}
}
@ -298,7 +299,7 @@ TEST_VM(VirtualSpace, actual_committed_space_one_large_page) {
size_t large_page_size = os::large_page_size();
ReservedSpace reserved(large_page_size, large_page_size, true);
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
ReservedSpaceReleaser releaser(&reserved);
ASSERT_TRUE(reserved.is_reserved());
@ -364,10 +365,10 @@ class TestReservedSpace : AllStatic {
static void test_reserved_space1(size_t size, size_t alignment) {
ASSERT_TRUE(is_aligned(size, alignment)) << "Incorrect input parameters";
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
ReservedSpace rs(size, // size
alignment, // alignment
UseLargePages, // large
page_size, // page size
(char *)NULL); // requested_address
EXPECT_TRUE(rs.base() != NULL);
@ -409,8 +410,9 @@ class TestReservedSpace : AllStatic {
EXPECT_TRUE(is_aligned(size, alignment)) << "Must be at least aligned against alignment";
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
ReservedSpace rs(size, alignment, large);
ReservedSpace rs(size, alignment, page_size);
EXPECT_TRUE(rs.base() != NULL);
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
@ -519,7 +521,7 @@ class TestVirtualSpace : AllStatic {
case Commit:
return ReservedSpace(reserve_size_aligned,
os::vm_allocation_granularity(),
/* large */ false);
os::vm_page_size());
}
}
@ -574,7 +576,7 @@ class TestVirtualSpace : AllStatic {
size_t large_page_size = os::large_page_size();
ReservedSpace reserved(large_page_size, large_page_size, true);
ReservedSpace reserved(large_page_size, large_page_size, large_page_size);
EXPECT_TRUE(reserved.is_reserved());