8330626: ZGC: Windows address space placeholders not managed correctly

Reviewed-by: stefank, aboldtch
This commit is contained in:
Stefan Johansson 2024-04-24 12:03:30 +00:00
parent e923dfe4c5
commit e311ba32a5
3 changed files with 232 additions and 9 deletions
src/hotspot
test/hotspot/gtest/gc/z

@ -40,6 +40,11 @@ public:
};
// Implements small pages (paged) support using placeholder reservation.
//
// When a memory area is free (kept by the virtual memory manager) a
// single placeholder is covering that memory area. When memory is
// allocated from the manager the placeholder is split into granule
// sized placeholders to allow mapping operations on that granularity.
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
private:
class PlaceholderCallbacks : public AllStatic {
@ -52,49 +57,87 @@ private:
ZMapper::coalesce_placeholders(ZOffset::address_unsafe(start), size);
}
static void split_into_placeholder_granules(zoffset start, size_t size) {
for (uintptr_t addr = untype(start); addr < untype(start) + size; addr += ZGranuleSize) {
split_placeholder(to_zoffset(addr), ZGranuleSize);
// Turn the single placeholder covering the memory area into granule
// sized placeholders.
static void split_into_granule_sized_placeholders(zoffset start, size_t size) {
assert(size >= ZGranuleSize, "Must be at least one granule");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Don't call split_placeholder on the last granule, since it is already
// a placeholder and the system call would therefore fail.
const size_t limit = size - ZGranuleSize;
for (size_t offset = 0; offset < limit; offset += ZGranuleSize) {
split_placeholder(start + offset, ZGranuleSize);
}
}
static void coalesce_into_one_placeholder(zoffset start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Granule sized areas are already covered by a single placeholder
if (size > ZGranuleSize) {
coalesce_placeholders(start, size);
}
}
// Called when a memory area is returned to the memory manager but can't
// be merged with an already existing area. Make sure this area is covered
// by a single placeholder.
static void create_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size());
}
// Called when a complete memory area in the memory manager is allocated.
// Create granule sized placeholders for the entire area.
static void destroy_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
split_into_granule_sized_placeholders(area->start(), area->size());
}
// Called when a memory area is allocated at the front of an exising memory area.
// Turn the first part of the memory area into granule sized placeholders.
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
assert(area->size() > size, "Must be larger than what we try to split out");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
split_into_placeholder_granules(area->start(), size);
// Split the area into two placeholders
split_placeholder(area->start(), size);
// Split the first part into granule sized placeholders
split_into_granule_sized_placeholders(area->start(), size);
}
// Called when a memory area is allocated at the end of an existing memory area.
// Turn the second part of the memory area into granule sized placeholders.
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
assert(area->size() > size, "Must be larger than what we try to split out");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(to_zoffset(untype(area->end()) - size), size - ZGranuleSize);
// Split the area into two placeholders
const zoffset start = to_zoffset(area->end() - size);
split_placeholder(start, size);
// Split the second part into granule sized placeholders
split_into_granule_sized_placeholders(start, size);
}
// Called when freeing a memory area and it can be merged at the start of an
// existing area. Coalesce the underlying placeholders into one.
static void grow_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(to_zoffset(untype(area->start()) - size), area->size() + size);
const zoffset start = area->start() - size;
coalesce_into_one_placeholder(start, area->size() + size);
}
// Called when freeing a memory area and it can be merged at the end of an
// existing area. Coalesce the underlying placeholders into one.
static void grow_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size() + size);
}

@ -47,6 +47,8 @@ public:
};
class ZVirtualMemoryManager {
friend class ZMapperTest;
private:
static size_t calculate_min_range(size_t size);

@ -0,0 +1,178 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#ifdef _WINDOWS
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zMemory.inline.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
using namespace testing;
#define EXPECT_ALLOC_OK(offset) EXPECT_NE(offset, zoffset(UINTPTR_MAX))
class ZMapperTest : public Test {
private:
static constexpr size_t ZMapperTestReservationSize = 32 * M;
static bool _initialized;
static ZMemoryManager* _va;
ZVirtualMemoryManager* _vmm;
public:
bool reserve_for_test() {
// Initialize platform specific parts before reserving address space
_vmm->pd_initialize_before_reserve();
// Reserve address space
if (!_vmm->pd_reserve(ZOffset::address_unsafe(zoffset(0)), ZMapperTestReservationSize)) {
return false;
}
// Make the address range free before setting up callbacks below
_va->free(zoffset(0), ZMapperTestReservationSize);
// Initialize platform specific parts after reserving address space
_vmm->pd_initialize_after_reserve();
return true;
}
virtual void SetUp() {
ZSyscall::initialize();
ZGlobalsPointers::initialize();
// Fake a ZVirtualMemoryManager
_vmm = (ZVirtualMemoryManager*)os::malloc(sizeof(ZVirtualMemoryManager), mtTest);
// Construct its internal ZMemoryManager
_va = new (&_vmm->_manager) ZMemoryManager();
// Reserve address space for the test
if (!reserve_for_test()) {
// Failed to reserve address space
GTEST_SKIP();
return;
}
_initialized = true;
}
virtual void TearDown() {
if (_initialized) {
_vmm->pd_unreserve(ZOffset::address_unsafe(zoffset(0)), 0);
}
os::free(_vmm);
}
static void test_alloc_low_address() {
// Verify that we get placeholder for first granule
zoffset bottom = _va->alloc_low_address(ZGranuleSize);
EXPECT_ALLOC_OK(bottom);
_va->free(bottom, ZGranuleSize);
// Alloc something larger than a granule and free it
bottom = _va->alloc_low_address(ZGranuleSize * 3);
EXPECT_ALLOC_OK(bottom);
_va->free(bottom, ZGranuleSize * 3);
// Free with more memory allocated
bottom = _va->alloc_low_address(ZGranuleSize);
EXPECT_ALLOC_OK(bottom);
zoffset next = _va->alloc_low_address(ZGranuleSize);
EXPECT_ALLOC_OK(next);
_va->free(bottom, ZGranuleSize);
_va->free(next, ZGranuleSize);
}
static void test_alloc_high_address() {
// Verify that we get placeholder for last granule
zoffset high = _va->alloc_high_address(ZGranuleSize);
EXPECT_ALLOC_OK(high);
zoffset prev = _va->alloc_high_address(ZGranuleSize);
EXPECT_ALLOC_OK(prev);
_va->free(high, ZGranuleSize);
_va->free(prev, ZGranuleSize);
// Alloc something larger than a granule and return it
high = _va->alloc_high_address(ZGranuleSize * 2);
EXPECT_ALLOC_OK(high);
_va->free(high, ZGranuleSize * 2);
}
static void test_alloc_whole_area() {
// Alloc the whole reservation
zoffset bottom = _va->alloc_low_address(ZMapperTestReservationSize);
EXPECT_ALLOC_OK(bottom);
// Free two chunks and then allocate them again
_va->free(bottom, ZGranuleSize * 4);
_va->free(bottom + ZGranuleSize * 6, ZGranuleSize * 6);
zoffset offset = _va->alloc_low_address(ZGranuleSize * 4);
EXPECT_ALLOC_OK(offset);
offset = _va->alloc_low_address(ZGranuleSize * 6);
EXPECT_ALLOC_OK(offset);
// Now free it all, and verify it can be re-allocated
_va->free(bottom, ZMapperTestReservationSize);
bottom = _va->alloc_low_address(ZMapperTestReservationSize);
EXPECT_ALLOC_OK(bottom);
_va->free(bottom, ZMapperTestReservationSize);
}
};
bool ZMapperTest::_initialized = false;
ZMemoryManager* ZMapperTest::_va = nullptr;
TEST_VM_F(ZMapperTest, test_alloc_low_address) {
test_alloc_low_address();
}
TEST_VM_F(ZMapperTest, test_alloc_high_address) {
test_alloc_high_address();
}
TEST_VM_F(ZMapperTest, test_alloc_whole_area) {
test_alloc_whole_area();
}
#endif // _WINDOWS