8252973: ZGC: Implement Large Pages support on Windows

Reviewed-by: eosterlund, mbeckwit, pliden
This commit is contained in:
Stefan Karlsson 2020-11-23 08:39:23 +00:00
parent e4a32bea9f
commit 69c3470e72
11 changed files with 472 additions and 168 deletions

View File

@ -29,7 +29,11 @@
#include <sys/mman.h>
#include <sys/types.h>
void ZVirtualMemoryManager::pd_initialize() {
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
// Does nothing
}
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
// Does nothing
}

View File

@ -22,8 +22,19 @@
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zLargePages.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "runtime/globals.hpp"
void ZLargePages::pd_initialize() {
if (UseLargePages) {
if (ZSyscall::is_large_pages_supported()) {
_state = Explicit;
return;
}
log_info_p(gc, init)("Shared large pages not supported on this OS version");
}
_state = Disabled;
}

View File

@ -199,6 +199,62 @@ void ZMapper::close_paging_file_mapping(HANDLE file_handle) {
}
}
HANDLE ZMapper::create_shared_awe_section() {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemSectionExtendedParameterUserPhysicalFlags;
parameter.ULong64 = 0;
HANDLE section = ZSyscall::CreateFileMapping2(
INVALID_HANDLE_VALUE, // File
NULL, // SecurityAttributes
SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess
PAGE_READWRITE, // PageProtection
SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes
0, // MaximumSize
NULL, // Name
&parameter, // ExtendedParameters
1 // ParameterCount
);
if (section == NULL) {
fatal("Could not create shared AWE section (%d)", GetLastError());
}
return section;
}
uintptr_t ZMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemExtendedParameterUserPhysicalHandle;
parameter.Handle = awe_section;
void* const res = ZSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_RESERVE | MEM_PHYSICAL, // AllocationType
PAGE_READWRITE, // PageProtection
&parameter, // ExtendedParameters
1 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void ZMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) {
bool res = VirtualFree(
(void*)addr, // lpAddress
0, // dwSize
MEM_RELEASE // dwFreeType
);
if (!res) {
fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}
void ZMapper::split_placeholder(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress

View File

@ -59,6 +59,15 @@ public:
// Close paging file mapping
static void close_paging_file_mapping(HANDLE file_handle);
// Create a shared AWE section
static HANDLE create_shared_awe_section();
// Reserve memory attached to the shared AWE section
static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size);
// Unreserve memory attached to a shared AWE section
static void unreserve_for_shared_awe(uintptr_t addr, size_t size);
// Split a placeholder
//
// A view can only replace an entire placeholder, so placeholders need to be

View File

@ -24,19 +24,195 @@
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zPhysicalMemoryBacking_windows.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
class ZPhysicalMemoryBackingImpl : public CHeapObj<mtGC> {
public:
virtual size_t commit(size_t offset, size_t size) = 0;
virtual size_t uncommit(size_t offset, size_t size) = 0;
virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0;
virtual void unmap(uintptr_t addr, size_t size) const = 0;
};
// Implements small pages (paged) support using placeholder reservation.
//
// The backing commits and uncommits physical memory, that can be
// multi-mapped into the virtual address space. To support fine-graned
// committing and uncommitting, each ZGranuleSize'd chunk is mapped to
// a separate paging file mapping.
class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl {
private:
ZGranuleMap<HANDLE> _handles;
HANDLE get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}
void put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}
void clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}
public:
ZPhysicalMemoryBackingSmallPages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
_handles(max_capacity) {}
size_t commit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
if (handle == 0) {
return i;
}
put_handle(offset + i, handle);
}
return size;
}
size_t uncommit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
ZMapper::close_paging_file_mapping(handle);
}
return size;
}
void map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
}
}
void unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += ZGranuleSize) {
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
}
}
};
// Implements Large Pages (locked) support using shared AWE physical memory.
//
// Shared AWE physical memory also works with small pages, but it has
// a few drawbacks that makes it a no-go to use it at this point:
//
// 1) It seems to use 8 bytes of committed memory per *reserved* memory.
// Given our scheme to use a large address space range this turns out to
// use too much memory.
//
// 2) It requires memory locking privilages, even for small pages. This
// has always been a requirement for large pages, and would be an extra
// restriction for usage with small pages.
//
// Note: The large pages size is tied to our ZGranuleSize.
extern HANDLE ZAWESection;
class ZPhysicalMemoryBackingLargePages : public ZPhysicalMemoryBackingImpl {
private:
ULONG_PTR* const _page_array;
static ULONG_PTR* alloc_page_array(size_t max_capacity) {
const size_t npages = max_capacity / ZGranuleSize;
const size_t array_size = npages * sizeof(ULONG_PTR);
return (ULONG_PTR*)os::malloc(array_size, mtGC);
}
public:
ZPhysicalMemoryBackingLargePages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
_page_array(alloc_page_array(max_capacity)) {}
size_t commit(size_t offset, size_t size) {
const size_t index = offset >> ZGranuleSizeShift;
const size_t npages = size >> ZGranuleSizeShift;
size_t npages_res = npages;
const bool res = AllocateUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size / M, offset, GetLastError());
} else {
log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset);
}
// AllocateUserPhysicalPages might not be able to allocate the requested amount of memory.
// The allocated number of pages are written in npages_res.
return npages_res << ZGranuleSizeShift;
}
size_t uncommit(size_t offset, size_t size) {
const size_t index = offset >> ZGranuleSizeShift;
const size_t npages = size >> ZGranuleSizeShift;
size_t npages_res = npages;
const bool res = FreeUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size, offset, GetLastError());
}
return npages_res << ZGranuleSizeShift;
}
void map(uintptr_t addr, size_t size, size_t offset) const {
const size_t npages = size >> ZGranuleSizeShift;
const size_t index = offset >> ZGranuleSizeShift;
const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]);
if (!res) {
fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
addr, size / M, offset, GetLastError());
}
}
void unmap(uintptr_t addr, size_t size) const {
const size_t npages = size >> ZGranuleSizeShift;
const bool res = MapUserPhysicalPages((char*)addr, npages, NULL);
if (!res) {
fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}
};
static ZPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
if (ZLargePages::is_enabled()) {
return new ZPhysicalMemoryBackingLargePages(max_capacity);
}
return new ZPhysicalMemoryBackingSmallPages(max_capacity);
}
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_handles(max_capacity) {}
_impl(select_impl(max_capacity)) {}
bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
@ -46,76 +222,31 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
HANDLE ZPhysicalMemoryBacking::get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}
void ZPhysicalMemoryBacking::put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}
void ZPhysicalMemoryBacking::clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}
size_t ZPhysicalMemoryBacking::commit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
if (handle == 0) {
return i;
}
put_handle(offset + i, handle);
}
return size;
}
size_t ZPhysicalMemoryBacking::uncommit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
ZMapper::close_paging_file_mapping(handle);
}
return size;
}
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
return commit_from_paging_file(offset, length);
return _impl->commit(offset, length);
}
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
return uncommit_from_paging_file(offset, length);
return _impl->uncommit(offset, length);
}
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
assert(is_aligned(offset, ZGranuleSize), "Misaligned: " PTR_FORMAT, offset);
assert(is_aligned(addr, ZGranuleSize), "Misaligned: " PTR_FORMAT, addr);
assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size);
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
}
_impl->map(addr, size, offset);
}
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += ZGranuleSize) {
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
}
_impl->unmap(addr, size);
}

View File

@ -24,20 +24,15 @@
#ifndef OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
#define OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
#include "gc/z/zGranuleMap.hpp"
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
class ZPhysicalMemoryBackingImpl;
class ZPhysicalMemoryBacking {
private:
ZGranuleMap<HANDLE> _handles;
HANDLE get_handle(uintptr_t offset) const;
void put_handle(uintptr_t offset, HANDLE handle);
void clear_handle(uintptr_t offset);
size_t commit_from_paging_file(size_t offset, size_t size);
size_t uncommit_from_paging_file(size_t offset, size_t size);
ZPhysicalMemoryBackingImpl* _impl;
public:
ZPhysicalMemoryBacking(size_t max_capacity);

View File

@ -28,42 +28,66 @@
#include "runtime/os.hpp"
ZSyscall::CreateFileMappingWFn ZSyscall::CreateFileMappingW;
ZSyscall::CreateFileMapping2Fn ZSyscall::CreateFileMapping2;
ZSyscall::VirtualAlloc2Fn ZSyscall::VirtualAlloc2;
ZSyscall::VirtualFreeExFn ZSyscall::VirtualFreeEx;
ZSyscall::MapViewOfFile3Fn ZSyscall::MapViewOfFile3;
ZSyscall::UnmapViewOfFile2Fn ZSyscall::UnmapViewOfFile2;
template <typename Fn>
static void lookup_symbol(Fn*& fn, const char* library, const char* symbol) {
static void* lookup_kernelbase_library() {
const char* const name = "KernelBase";
char ebuf[1024];
void* const handle = os::dll_load(library, ebuf, sizeof(ebuf));
void* const handle = os::dll_load(name, ebuf, sizeof(ebuf));
if (handle == NULL) {
log_error_p(gc)("Failed to load library: %s", library);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
log_error_p(gc)("Failed to load library: %s", name);
}
return handle;
}
fn = reinterpret_cast<Fn*>(os::dll_lookup(handle, symbol));
static void* lookup_kernelbase_symbol(const char* name) {
static void* const handle = lookup_kernelbase_library();
if (handle == NULL) {
return NULL;
}
return os::dll_lookup(handle, name);
}
static bool has_kernelbase_symbol(const char* name) {
return lookup_kernelbase_symbol(name) != NULL;
}
template <typename Fn>
static void install_kernelbase_symbol(Fn*& fn, const char* name) {
fn = reinterpret_cast<Fn*>(lookup_kernelbase_symbol(name));
}
template <typename Fn>
static void install_kernelbase_1803_symbol_or_exit(Fn*& fn, const char* name) {
install_kernelbase_symbol(fn, name);
if (fn == NULL) {
log_error_p(gc)("Failed to lookup symbol: %s", symbol);
log_error_p(gc)("Failed to lookup symbol: %s", name);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
}
void ZSyscall::initialize() {
lookup_symbol(CreateFileMappingW, "KernelBase", "CreateFileMappingW");
lookup_symbol(VirtualAlloc2, "KernelBase", "VirtualAlloc2");
lookup_symbol(VirtualFreeEx, "KernelBase", "VirtualFreeEx");
lookup_symbol(MapViewOfFile3, "KernelBase", "MapViewOfFile3");
lookup_symbol(UnmapViewOfFile2, "KernelBase", "UnmapViewOfFile2");
// Required
install_kernelbase_1803_symbol_or_exit(CreateFileMappingW, "CreateFileMappingW");
install_kernelbase_1803_symbol_or_exit(VirtualAlloc2, "VirtualAlloc2");
install_kernelbase_1803_symbol_or_exit(VirtualFreeEx, "VirtualFreeEx");
install_kernelbase_1803_symbol_or_exit(MapViewOfFile3, "MapViewOfFile3");
install_kernelbase_1803_symbol_or_exit(UnmapViewOfFile2, "UnmapViewOfFile2");
// Optional - for large pages support
install_kernelbase_symbol(CreateFileMapping2, "CreateFileMapping2");
}
bool ZSyscall::is_supported() {
char ebuf[1024];
void* const handle = os::dll_load("KernelBase", ebuf, sizeof(ebuf));
if (handle == NULL) {
assert(false, "Failed to load library: KernelBase");
return false;
}
return os::dll_lookup(handle, "VirtualAlloc2") != NULL;
// Available in Windows version 1803 and later
return has_kernelbase_symbol("VirtualAlloc2");
}
bool ZSyscall::is_large_pages_supported() {
// Available in Windows version 1809 and later
return has_kernelbase_symbol("CreateFileMapping2");
}

View File

@ -32,6 +32,7 @@
class ZSyscall {
private:
typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR);
typedef HANDLE (*CreateFileMapping2Fn)(HANDLE, LPSECURITY_ATTRIBUTES, ULONG, ULONG, ULONG, ULONG64, PCWSTR, PMEM_EXTENDED_PARAMETER, ULONG);
typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD);
typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
@ -39,6 +40,7 @@ private:
public:
static CreateFileMappingWFn CreateFileMappingW;
static CreateFileMapping2Fn CreateFileMapping2;
static VirtualAlloc2Fn VirtualAlloc2;
static VirtualFreeExFn VirtualFreeEx;
static MapViewOfFile3Fn MapViewOfFile3;
@ -47,6 +49,7 @@ public:
static void initialize();
static bool is_supported();
static bool is_large_pages_supported();
};
#endif // OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP

View File

@ -24,105 +24,172 @@
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
static void split_placeholder(uintptr_t start, size_t size) {
ZMapper::split_placeholder(ZAddress::marked0(start), size);
ZMapper::split_placeholder(ZAddress::marked1(start), size);
ZMapper::split_placeholder(ZAddress::remapped(start), size);
}
class ZVirtualMemoryManagerImpl : public CHeapObj<mtGC> {
public:
virtual void initialize_before_reserve() {}
virtual void initialize_after_reserve(ZMemoryManager* manager) {}
virtual bool reserve(uintptr_t addr, size_t size) = 0;
virtual void unreserve(uintptr_t addr, size_t size) = 0;
};
static void coalesce_placeholders(uintptr_t start, size_t size) {
ZMapper::coalesce_placeholders(ZAddress::marked0(start), size);
ZMapper::coalesce_placeholders(ZAddress::marked1(start), size);
ZMapper::coalesce_placeholders(ZAddress::remapped(start), size);
}
// Implements small pages (paged) support using placeholder reservation.
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
private:
class PlaceholderCallbacks : public AllStatic {
public:
static void split_placeholder(uintptr_t start, size_t size) {
ZMapper::split_placeholder(ZAddress::marked0(start), size);
ZMapper::split_placeholder(ZAddress::marked1(start), size);
ZMapper::split_placeholder(ZAddress::remapped(start), size);
}
static void split_into_placeholder_granules(uintptr_t start, size_t size) {
for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) {
split_placeholder(addr, ZGranuleSize);
static void coalesce_placeholders(uintptr_t start, size_t size) {
ZMapper::coalesce_placeholders(ZAddress::marked0(start), size);
ZMapper::coalesce_placeholders(ZAddress::marked1(start), size);
ZMapper::coalesce_placeholders(ZAddress::remapped(start), size);
}
static void split_into_placeholder_granules(uintptr_t start, size_t size) {
for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) {
split_placeholder(addr, ZGranuleSize);
}
}
static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
if (size > ZGranuleSize) {
coalesce_placeholders(start, size);
}
}
static void create_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size());
}
static void destroy_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
}
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
split_into_placeholder_granules(area->start(), size);
}
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->end() - size, size - ZGranuleSize);
}
static void grow_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start() - size, area->size() + size);
}
static void grow_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size() + size);
}
static void register_with(ZMemoryManager* manager) {
// Each reserved virtual memory address area registered in _manager is
// exactly covered by a single placeholder. Callbacks are installed so
// that whenever a memory area changes, the corresponding placeholder
// is adjusted.
//
// The create and grow callbacks are called when virtual memory is
// returned to the memory manager. The new memory area is then covered
// by a new single placeholder.
//
// The destroy and shrink callbacks are called when virtual memory is
// allocated from the memory manager. The memory area is then is split
// into granule-sized placeholders.
//
// See comment in zMapper_windows.cpp explaining why placeholders are
// split into ZGranuleSize sized placeholders.
ZMemoryManager::Callbacks callbacks;
callbacks._create = &create_callback;
callbacks._destroy = &destroy_callback;
callbacks._shrink_from_front = &shrink_from_front_callback;
callbacks._shrink_from_back = &shrink_from_back_callback;
callbacks._grow_from_front = &grow_from_front_callback;
callbacks._grow_from_back = &grow_from_back_callback;
manager->register_callbacks(callbacks);
}
};
virtual void initialize_after_reserve(ZMemoryManager* manager) {
PlaceholderCallbacks::register_with(manager);
}
}
static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
virtual bool reserve(uintptr_t addr, size_t size) {
const uintptr_t res = ZMapper::reserve(addr, size);
if (size > ZGranuleSize) {
coalesce_placeholders(start, size);
assert(res == addr || res == NULL, "Should not reserve other memory than requested");
return res == addr;
}
virtual void unreserve(uintptr_t addr, size_t size) {
ZMapper::unreserve(addr, size);
}
};
// Implements Large Pages (locked) support using shared AWE physical memory.
// ZPhysicalMemory layer needs access to the section
HANDLE ZAWESection;
class ZVirtualMemoryManagerLargePages : public ZVirtualMemoryManagerImpl {
private:
virtual void initialize_before_reserve() {
ZAWESection = ZMapper::create_shared_awe_section();
}
virtual bool reserve(uintptr_t addr, size_t size) {
const uintptr_t res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size);
assert(res == addr || res == NULL, "Should not reserve other memory than requested");
return res == addr;
}
virtual void unreserve(uintptr_t addr, size_t size) {
ZMapper::unreserve_for_shared_awe(addr, size);
}
};
static ZVirtualMemoryManagerImpl* _impl = NULL;
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
if (ZLargePages::is_enabled()) {
_impl = new ZVirtualMemoryManagerLargePages();
} else {
_impl = new ZVirtualMemoryManagerSmallPages();
}
_impl->initialize_before_reserve();
}
static void create_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size());
}
static void destroy_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
}
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
split_into_placeholder_granules(area->start(), size);
}
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->end() - size, size - ZGranuleSize);
}
static void grow_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start() - size, area->size() + size);
}
static void grow_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size() + size);
}
void ZVirtualMemoryManager::pd_initialize() {
// Each reserved virtual memory address area registered in _manager is
// exactly covered by a single placeholder. Callbacks are installed so
// that whenever a memory area changes, the corresponding placeholder
// is adjusted.
//
// The create and grow callbacks are called when virtual memory is
// returned to the memory manager. The new memory area is then covered
// by a new single placeholder.
//
// The destroy and shrink callbacks are called when virtual memory is
// allocated from the memory manager. The memory area is then is split
// into granule-sized placeholders.
//
// See comment in zMapper_windows.cpp explaining why placeholders are
// split into ZGranuleSize sized placeholders.
ZMemoryManager::Callbacks callbacks;
callbacks._create = &create_callback;
callbacks._destroy = &destroy_callback;
callbacks._shrink_from_front = &shrink_from_front_callback;
callbacks._shrink_from_back = &shrink_from_back_callback;
callbacks._grow_from_front = &grow_from_front_callback;
callbacks._grow_from_back = &grow_from_back_callback;
_manager.register_callbacks(callbacks);
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
_impl->initialize_after_reserve(&_manager);
}
bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
uintptr_t res = ZMapper::reserve(addr, size);
assert(res == addr || res == NULL, "Should not reserve other memory than requested");
return res == addr;
return _impl->reserve(addr, size);
}
void ZVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
ZMapper::unreserve(addr, size);
_impl->unreserve(addr, size);
}

View File

@ -42,14 +42,17 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
return;
}
// Initialize platform specific parts before reserving address space
pd_initialize_before_reserve();
// Reserve address space
if (!reserve(max_capacity)) {
log_error_pd(gc)("Failed to reserve enough address space for Java heap");
return;
}
// Initialize platform specific parts
pd_initialize();
// Initialize platform specific parts after reserving address space
pd_initialize_after_reserve();
// Successfully initialized
_initialized = true;

View File

@ -51,7 +51,8 @@ private:
bool _initialized;
// Platform specific implementation
void pd_initialize();
void pd_initialize_before_reserve();
void pd_initialize_after_reserve();
bool pd_reserve(uintptr_t addr, size_t size);
void pd_unreserve(uintptr_t addr, size_t size);