8310743: assert(reserved_rgn != nullptr) failed: Add committed region, No reserved region found

Reviewed-by: stefank, ayang
This commit is contained in:
Axel Boldt-Christmas 2023-07-03 14:06:58 +00:00
parent ba974d5c62
commit f393975d1b
12 changed files with 445 additions and 48 deletions

View File

@ -38,6 +38,9 @@ const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift
// Virtual memory to physical memory ratio
const size_t ZVirtualToPhysicalRatio = 16; // 16:1
// Max virtual memory ranges
const size_t ZMaxVirtualReservations = 100; // Each reservation at least 1% of total
// Page size shifts
const size_t ZPageSizeSmallShift = ZGranuleSizeShift;
extern size_t ZPageSizeMediumShift;

View File

@ -81,6 +81,10 @@ ZMemoryManager::ZMemoryManager()
: _freelist(),
_callbacks() {}
bool ZMemoryManager::free_is_contiguous() const {
return _freelist.size() == 1;
}
void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
_callbacks = callbacks;
}

View File

@ -81,6 +81,8 @@ private:
public:
ZMemoryManager();
bool free_is_contiguous() const;
void register_callbacks(const Callbacks& callbacks);
zoffset peek_low_address() const;

View File

@ -0,0 +1,121 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zNMT.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "memory/allocation.hpp"
#include "services/memTracker.hpp"
#include "utilities/nativeCallStack.hpp"
ZNMT::Reservation ZNMT::_reservations[ZMaxVirtualReservations] = {};
size_t ZNMT::_num_reservations = 0;
size_t ZNMT::reservation_index(zoffset offset, size_t* offset_in_reservation) {
assert(_num_reservations > 0, "at least one reservation must exist");
size_t index = 0;
*offset_in_reservation = untype(offset);
for (; index < _num_reservations; ++index) {
const size_t reservation_size = _reservations[index]._size;
if (*offset_in_reservation < reservation_size) {
break;
}
*offset_in_reservation -= reservation_size;
}
assert(index != _num_reservations, "failed to find reservation index");
return index;
}
void ZNMT::process_fake_mapping(zoffset offset, size_t size, bool commit) {
// In order to satisfy NTM's requirement of an 1:1 mapping between committed
// and reserved addresses, a fake mapping from the offset into the reservation
// is used.
//
// These mappings from
// [offset, offset + size) -> {[virtual address range], ...}
// are stable after the heap has been reserved. No commits proceed any
// reservations. Committing and uncommitting the same [offset, offset + size)
// range will result in same virtual memory ranges.
size_t left_to_process = size;
size_t offset_in_reservation;
for (size_t i = reservation_index(offset, &offset_in_reservation); i < _num_reservations; ++i) {
const zaddress_unsafe reservation_start = _reservations[i]._start;
const size_t reservation_size = _reservations[i]._size;
const size_t sub_range_size = MIN2(left_to_process, reservation_size - offset_in_reservation);
const uintptr_t sub_range_addr = untype(reservation_start) + offset_in_reservation;
// commit / uncommit memory
if (commit) {
MemTracker::record_virtual_memory_commit((void*)sub_range_addr, sub_range_size, CALLER_PC);
} else {
if (MemTracker::enabled()) {
Tracker tracker(Tracker::uncommit);
tracker.record((address)sub_range_addr, sub_range_size);
}
}
left_to_process -= sub_range_size;
if (left_to_process == 0) {
// Processed all nmt registrations
return;
}
offset_in_reservation = 0;
}
assert(left_to_process == 0, "everything was not commited");
}
void ZNMT::reserve(zaddress_unsafe start, size_t size) {
assert(_num_reservations < ZMaxVirtualReservations, "too many reservations");
// Keep track of the reservations made in order to create fake mappings
// between the reserved and commited memory.
// See details in ZNMT::process_fake_mapping
_reservations[_num_reservations++] = {start, size};
MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC, mtJavaHeap);
}
void ZNMT::commit(zoffset offset, size_t size) {
// NMT expects a 1-to-1 mapping between virtual and physical memory.
// ZGC can temporarily have multiple virtual addresses pointing to
// the same physical memory.
//
// When this function is called we don't know where in the virtual memory
// this physical memory will be mapped. So we fake the virtual memory
// address by mapping the physical offset into offsets in the reserved
// memory space.
process_fake_mapping(offset, size, true);
}
void ZNMT::uncommit(zoffset offset, size_t size) {
// We fake the virtual memory address by mapping the physical offset
// into offsets in the reserved memory space.
// See comment in ZNMT::commit
process_fake_mapping(offset, size, false);
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZNMT_HPP
#define SHARE_GC_Z_ZNMT_HPP
#include "gc/z/zAddress.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zMemory.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/nativeCallStack.hpp"
class ZNMT : public AllStatic {
private:
struct Reservation {
zaddress_unsafe _start;
size_t _size;
};
static Reservation _reservations[ZMaxVirtualReservations];
static size_t _num_reservations;
static size_t reservation_index(zoffset offset, size_t* offset_in_reservation);
static void process_fake_mapping(zoffset offset, size_t size, bool commit);
public:
static void reserve(zaddress_unsafe start, size_t size);
static void commit(zoffset offset, size_t size);
static void uncommit(zoffset offset, size_t size);
};
#endif // SHARE_GC_Z_ZNMT_HPP

View File

@ -28,6 +28,7 @@
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zNMT.hpp"
#include "gc/z/zNUMA.inline.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "logging/log.hpp"
@ -35,7 +36,6 @@
#include "runtime/globals_extension.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@ -276,26 +276,6 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
}
void ZPhysicalMemoryManager::nmt_commit(zoffset offset, size_t size) const {
// NMT expects a 1-to-1 mapping between virtual and physical memory.
// ZGC can temporarily have multiple virtual addresses pointing to
// the same physical memory.
//
// When this function is called we don't know where in the virtual memory
// this physical memory will be mapped. So we fake that the virtual memory
// address is the heap base + the given offset.
const uintptr_t addr = ZAddressHeapBase + untype(offset);
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}
void ZPhysicalMemoryManager::nmt_uncommit(zoffset offset, size_t size) const {
if (MemTracker::enabled()) {
const uintptr_t addr = ZAddressHeapBase + untype(offset);
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
}
}
void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Invalid size");
@ -330,7 +310,7 @@ bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
const size_t committed = _backing.commit(segment.start(), segment.size());
// Register with NMT
nmt_commit(segment.start(), committed);
ZNMT::commit(segment.start(), committed);
// Register committed segment
if (!pmem.commit_segment(i, committed)) {
@ -356,7 +336,7 @@ bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
// Unregister with NMT
nmt_uncommit(segment.start(), uncommitted);
ZNMT::uncommit(segment.start(), uncommitted);
// Deregister uncommitted segment
if (!pmem.uncommit_segment(i, uncommitted)) {

View File

@ -84,9 +84,6 @@ private:
ZPhysicalMemoryBacking _backing;
ZMemoryManager _manager;
void nmt_commit(zoffset offset, size_t size) const;
void nmt_uncommit(zoffset offset, size_t size) const;
void pretouch_view(zaddress addr, size_t size) const;
void map_view(zaddress_unsafe addr, const ZPhysicalMemory& pmem) const;
void unmap_view(zaddress_unsafe addr, size_t size) const;

View File

@ -22,12 +22,13 @@
*/
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zAddressSpaceLimit.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zNMT.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@ -54,6 +55,39 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
_initialized = true;
}
#ifdef ASSERT
size_t ZVirtualMemoryManager::force_reserve_discontiguous(size_t size) {
const size_t min_range = calculate_min_range(size);
const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range);
size_t reserved = 0;
// Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory
// ranges. Starting with higher addresses.
uintptr_t end = ZAddressOffsetMax;
while (reserved < size && end >= max_range) {
const size_t remaining = size - reserved;
const size_t reserve_size = MIN2(max_range, remaining);
const uintptr_t reserve_start = end - reserve_size;
if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) {
reserved += reserve_size;
}
end -= reserve_size * 2;
}
// If (reserved < size) attempt to reserve the rest via normal divide and conquer
uintptr_t start = 0;
while (reserved < size && start < ZAddressOffsetMax) {
const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range);
start += remaining;
}
return reserved;
}
#endif
size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, size_t min_range) {
if (size < min_range) {
// Too small
@ -75,15 +109,20 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size,
// Divide and conquer
const size_t first_part = align_down(half, ZGranuleSize);
const size_t second_part = size - first_part;
return reserve_discontiguous(start, first_part, min_range) +
reserve_discontiguous(start + first_part, second_part, min_range);
const size_t first_size = reserve_discontiguous(start, first_part, min_range);
const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range);
return first_size + second_size;
}
size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
size_t ZVirtualMemoryManager::calculate_min_range(size_t size) {
// Don't try to reserve address ranges smaller than 1% of the requested size.
// This avoids an explosion of reservation attempts in case large parts of the
// address space is already occupied.
const size_t min_range = align_up(size / 100, ZGranuleSize);
return align_up(size / ZMaxVirtualReservations, ZGranuleSize);
}
size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
const size_t min_range = calculate_min_range(size);
uintptr_t start = 0;
size_t reserved = 0;
@ -98,7 +137,7 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
}
bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned " SIZE_FORMAT_X, size);
// Reserve address views
const zaddress_unsafe addr = ZOffset::address_unsafe(start);
@ -109,7 +148,7 @@ bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) {
}
// Register address views with native memory tracker
nmt_reserve(addr, size);
ZNMT::reserve(addr, size);
// Make the address range free
_manager.free(start, size);
@ -137,15 +176,25 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap());
const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit);
size_t reserved = size;
bool contiguous = true;
auto do_reserve = [&]() {
#ifdef ASSERT
if (ZForceDiscontiguousHeapReservations > 0) {
return force_reserve_discontiguous(size);
}
#endif
// Prefer a contiguous address space
if (reserve_contiguous(size)) {
return size;
}
// Prefer a contiguous address space
if (!reserve_contiguous(size)) {
// Fall back to a discontiguous address space
reserved = reserve_discontiguous(size);
contiguous = false;
}
return reserve_discontiguous(size);
};
const size_t reserved = do_reserve();
const bool contiguous = _manager.free_is_contiguous();
log_info_p(gc, init)("Address Space Type: %s/%s/%s",
(contiguous ? "Contiguous" : "Discontiguous"),
@ -159,11 +208,6 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
return reserved >= max_capacity;
}
void ZVirtualMemoryManager::nmt_reserve(zaddress_unsafe start, size_t size) {
MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC);
MemTracker::record_virtual_memory_type((void*)untype(start), mtJavaHeap);
}
bool ZVirtualMemoryManager::is_initialized() const {
return _initialized;
}
@ -179,6 +223,10 @@ ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address)
start = _manager.alloc_high_address(size);
}
if (start == zoffset(UINTPTR_MAX)) {
return ZVirtualMemory();
}
return ZVirtualMemory(start, size);
}

View File

@ -24,6 +24,7 @@
#ifndef SHARE_GC_Z_ZVIRTUALMEMORY_HPP
#define SHARE_GC_Z_ZVIRTUALMEMORY_HPP
#include "gc/z/zAddress.hpp"
#include "gc/z/zMemory.hpp"
class ZVirtualMemory {
@ -47,6 +48,8 @@ public:
class ZVirtualMemoryManager {
private:
static size_t calculate_min_range(size_t size);
ZMemoryManager _manager;
size_t _reserved;
bool _initialized;
@ -63,7 +66,7 @@ private:
size_t reserve_discontiguous(size_t size);
bool reserve(size_t max_capacity);
void nmt_reserve(zaddress_unsafe start, size_t size);
DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);)
public:
ZVirtualMemoryManager(size_t max_capacity);

View File

@ -24,7 +24,8 @@
#ifndef SHARE_GC_Z_Z_GLOBALS_HPP
#define SHARE_GC_Z_Z_GLOBALS_HPP
#include "zPageAge.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zPageAge.hpp"
#define GC_Z_FLAGS(develop, \
develop_pd, \
@ -68,7 +69,17 @@
\
product(int, ZTenuringThreshold, -1, DIAGNOSTIC, \
"Young generation tenuring threshold, -1 for dynamic computation")\
range(-1, static_cast<int>(ZPageAgeMax))
range(-1, static_cast<int>(ZPageAgeMax)) \
\
develop(size_t, ZForceDiscontiguousHeapReservations, 0, \
"The gc will attempt to split the heap reservation into this " \
"many reservations, subject to available virtual address space " \
"and invariant restrictions. Higher virtual addresses are " \
"preferred " \
"0: Disabled " \
"1: Attempt contiguous reservation starting at a higher address " \
"N: Force that many reservations, if possible") \
range(0, ZMaxVirtualReservations)
// end of GC_Z_FLAGS

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package gc.z;
/**
* @test TestZForceDiscontiguousHeapReservations
* @requires vm.gc.ZGenerational & vm.debug
* @summary Test the ZForceDiscontiguousHeapReservations development flag
* @library /test/lib
* @run driver gc.z.TestZForceDiscontiguousHeapReservations
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
public class TestZForceDiscontiguousHeapReservations {
private static void testValue(int n) throws Exception {
/**
* Xmx is picked so that it is divisible by 'ZForceDiscontiguousHeapReservations * ZGranuleSize'
* Xms is picked so that it is less than '16 * Xmx / ZForceDiscontiguousHeapReservations' as ZGC
* cannot currently handle a discontiguous heap with an initial size larger than the individual
* reservations.
*/
final int XmxInM = 2000;
final int XmsInM = Math.min(16 * XmxInM / (n + 1), XmxInM);
OutputAnalyzer oa = ProcessTools.executeProcess(ProcessTools.createTestJvm(
"-XX:+UseZGC",
"-XX:+ZGenerational",
"-Xms" + XmsInM + "M",
"-Xmx" + XmxInM + "M",
"-Xlog:gc,gc+init",
"-XX:ZForceDiscontiguousHeapReservations=" + n,
"-version"))
.outputTo(System.out)
.errorTo(System.out)
.shouldHaveExitValue(0);
if (n > 1) {
oa.shouldContain("Address Space Type: Discontiguous");
}
}
public static void main(String[] args) throws Exception {
testValue(0);
testValue(1);
testValue(2);
testValue(100);
}
}

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package gc.z;
/**
* @test TestZNMT
* @bug 8310743
* @requires vm.gc.ZGenerational & vm.debug
* @summary Test NMT and ZGenerational heap reservation / commits interactions.
* @library / /test/lib
* @run driver gc.z.TestZNMT
*/
import static gc.testlibrary.Allocation.blackHole;
import java.util.ArrayList;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
public class TestZNMT {
private static final int XmxInM = 2000;
static class Test {
private static final int K = 1024;
private static final int M = K * K;
public static void main(String[] args) throws Exception {
final int zForceDiscontiguousHeapReservations = Integer.parseInt(args[0]);
final int XmsInM = Integer.parseInt(args[1]);
// 75% of the largest allocation that fits within one reservation
// (or Xmx / zForceDiscontiguousHeapReservations), whichever is smallest
final int allocationInM = (int)(Math.min(zForceDiscontiguousHeapReservations == 0
? XmxInM
: XmxInM / zForceDiscontiguousHeapReservations,
XmsInM) * 0.75);
ArrayList<byte[]> list = new ArrayList<>(zForceDiscontiguousHeapReservations);
for (int i = 0; i < zForceDiscontiguousHeapReservations; i++) {
list.add(new byte[allocationInM * M]);
}
blackHole(list);
}
}
private static void testValue(int zForceDiscontiguousHeapReservations) throws Exception {
/**
* Xmx is picked so that it is divisible by 'ZForceDiscontiguousHeapReservations * ZGranuleSize'
* Xms is picked so that it is less than '16 * Xmx / ZForceDiscontiguousHeapReservations' as ZGC
* cannot currently handle a discontiguous heap with an initial size larger than the individual
* reservations.
*/
final int XmsInM = Math.min(16 * XmxInM / (zForceDiscontiguousHeapReservations + 1), XmxInM);
OutputAnalyzer oa = ProcessTools.executeProcess(ProcessTools.createTestJvm(
"-XX:+UseZGC",
"-XX:+ZGenerational",
"-Xms" + XmsInM + "M",
"-Xmx" + XmxInM + "M",
"-Xlog:gc,gc+init",
"-XX:ZForceDiscontiguousHeapReservations=" + zForceDiscontiguousHeapReservations,
"-XX:NativeMemoryTracking=detail",
"-XX:+PrintNMTStatistics",
Test.class.getName(),
Integer.toString(zForceDiscontiguousHeapReservations),
Integer.toString(XmxInM)))
.outputTo(System.out)
.errorTo(System.out)
.shouldHaveExitValue(0);
if (zForceDiscontiguousHeapReservations > 1) {
oa.shouldContain("Address Space Type: Discontiguous");
}
if (XmsInM < XmxInM) {
// There will be reservations which are smaller than the total
// memory allocated in TestZNMT.Test.main. This means that some
// reservation will be completely committed and print the following
// in the NMT statistics.
oa.shouldMatch("reserved and committed \\d+ for Java Heap");
}
}
public static void main(String[] args) throws Exception {
testValue(0);
testValue(1);
testValue(2);
testValue(100);
}
}