8307356: Metaspace: simplify BinList handling

Reviewed-by: rkennke, coleenp
This commit is contained in:
Thomas Stuefe 2023-08-18 05:51:05 +00:00
parent 0299364d85
commit 891c3f4cca
11 changed files with 106 additions and 119 deletions

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* Copyright (c) 2023 Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,12 +39,8 @@ namespace metaspace {
// (only a few words). It is used to manage deallocated blocks - see
// class FreeBlocks.
// Memory blocks are kept in linked lists. Each list
// contains blocks of only one size. There is a list for blocks of two words,
// for blocks of three words, etc. The list heads are kept in a vector,
// ordered by block size.
// Memory blocks are kept in a vector of linked lists of equi-sized blocks:
//
// wordsize
//
// +---+ +---+ +---+ +---+
@ -73,32 +70,28 @@ namespace metaspace {
// This structure is a bit expensive in memory costs (we pay one pointer per managed
// block size) so we only use it for a small number of sizes.
template <size_t smallest_word_size, int num_lists>
template <int num_lists>
class BinListImpl {
struct Block {
Block* const _next;
const size_t _word_size;
Block(Block* next, size_t word_size) :
_next(next),
_word_size(word_size)
{}
Block(Block* next) : _next(next) {}
};
#define BLOCK_FORMAT "Block @" PTR_FORMAT ": size: " SIZE_FORMAT ", next: " PTR_FORMAT
#define BLOCK_FORMAT_ARGS(b) p2i(b), (b)->_word_size, p2i((b)->_next)
#define BLOCK_FORMAT "Block @" PTR_FORMAT ": size: " SIZE_FORMAT ", next: " PTR_FORMAT
#define BLOCK_FORMAT_ARGS(b, sz) p2i(b), (sz), p2i((b)->_next)
// Smallest block size must be large enough to hold a Block structure.
STATIC_ASSERT(smallest_word_size * sizeof(MetaWord) >= sizeof(Block));
// Block size must be exactly one word size.
STATIC_ASSERT(sizeof(Block) == BytesPerWord);
STATIC_ASSERT(num_lists > 0);
public:
// Minimal word size a block must have to be manageable by this structure.
const static size_t MinWordSize = smallest_word_size;
const static size_t MinWordSize = 1;
// Maximal (incl) word size a block can have to be manageable by this structure.
const static size_t MaxWordSize = MinWordSize + num_lists - 1;
const static size_t MaxWordSize = num_lists;
private:
@ -106,15 +99,17 @@ private:
MemRangeCounter _counter;
// Given a word size, returns the index of the list holding blocks of that size
static int index_for_word_size(size_t word_size) {
int index = (int)(word_size - MinWordSize);
assert(index >= 0 && index < num_lists, "Invalid index %d", index);
return index;
}
// Given an index of a list, return the word size that list serves
static size_t word_size_for_index(int index) {
assert(index >= 0 && index < num_lists, "Invalid index %d", index);
return MinWordSize + index;
return index + MinWordSize;
}
// Search the range [index, _num_lists) for the smallest non-empty list. Returns -1 on fail.
@ -127,6 +122,19 @@ private:
return i2 == num_lists ? -1 : i2;
}
#ifdef ASSERT
static const uintptr_t canary = 0xFFEEFFEE;
static void write_canary(MetaWord* p, size_t word_size) {
if (word_size > 1) { // 1-word-sized blocks have no space for a canary
((uintptr_t*)p)[word_size - 1] = canary;
}
}
static bool check_canary(const Block* b, size_t word_size) {
return word_size == 1 || // 1-word-sized blocks have no space for a canary
((const uintptr_t*)b)[word_size - 1] == canary;
}
#endif
public:
BinListImpl() {
@ -138,9 +146,10 @@ public:
void add_block(MetaWord* p, size_t word_size) {
assert(word_size >= MinWordSize &&
word_size <= MaxWordSize, "bad block size");
DEBUG_ONLY(write_canary(p, word_size);)
const int index = index_for_word_size(word_size);
Block* old_head = _blocks[index];
Block* new_head = new(p)Block(old_head, word_size);
Block* new_head = new (p) Block(old_head);
_blocks[index] = new_head;
_counter.add(word_size);
}
@ -156,9 +165,8 @@ public:
Block* b = _blocks[index];
const size_t real_word_size = word_size_for_index(index);
assert(b != nullptr, "Sanity");
assert(b->_word_size >= word_size &&
b->_word_size == real_word_size,
"bad block size in list[%d] (" BLOCK_FORMAT ")", index, BLOCK_FORMAT_ARGS(b));
assert(check_canary(b, real_word_size),
"bad block in list[%d] (" BLOCK_FORMAT ")", index, BLOCK_FORMAT_ARGS(b, real_word_size));
_blocks[index] = b->_next;
_counter.sub(real_word_size);
*p_real_word_size = real_word_size;
@ -181,12 +189,10 @@ public:
void verify() const {
MemRangeCounter local_counter;
for (int i = 0; i < num_lists; i++) {
const size_t s = MinWordSize + i;
const size_t s = word_size_for_index(i);
int pos = 0;
for (Block* b = _blocks[i]; b != nullptr; b = b->_next, pos++) {
assert(b->_word_size == s,
"bad block size in list[%d] at pos %d (" BLOCK_FORMAT ")",
i, pos, BLOCK_FORMAT_ARGS(b));
assert(check_canary(b, s), "");
local_counter.add(s);
}
}
@ -196,7 +202,7 @@ public:
};
typedef BinListImpl<2, 32> BinList32;
typedef BinListImpl<32> BinList32;
} // namespace metaspace

View File

@ -41,8 +41,7 @@ namespace metaspace {
// memory blocks themselves are the nodes, with the block size being the key.
//
// We store node pointer information in these blocks when storing them. That
// imposes a minimum size to the managed memory blocks.
// See get_raw_word_size_for_requested_word_size() (msCommon.hpp).
// imposes a minimum size to the managed memory blocks (1 word)
//
// We want to manage many memory blocks of the same size, but we want
// to prevent the tree from blowing up and degenerating into a list. Therefore

View File

@ -31,7 +31,6 @@
namespace metaspace {
void FreeBlocks::add_block(MetaWord* p, size_t word_size) {
assert(word_size >= MinWordSize, "sanity (" SIZE_FORMAT ")", word_size);
if (word_size > MaxSmallBlocksWordSize) {
_tree.add_block(p, word_size);
} else {
@ -40,8 +39,6 @@ void FreeBlocks::add_block(MetaWord* p, size_t word_size) {
}
MetaWord* FreeBlocks::remove_block(size_t requested_word_size) {
assert(requested_word_size >= MinWordSize,
"requested_word_size too small (" SIZE_FORMAT ")", requested_word_size);
size_t real_size = 0;
MetaWord* p = nullptr;
if (requested_word_size > MaxSmallBlocksWordSize) {
@ -53,7 +50,7 @@ MetaWord* FreeBlocks::remove_block(size_t requested_word_size) {
// Blocks which are larger than a certain threshold are split and
// the remainder is handed back to the manager.
const size_t waste = real_size - requested_word_size;
if (waste > MinWordSize) {
if (waste >= MinWordSize) {
add_block(p + requested_word_size, waste);
}
}

View File

@ -60,7 +60,7 @@ chunklevel_t MetaspaceArena::next_chunk_level() const {
void MetaspaceArena::salvage_chunk(Metachunk* c) {
assert_lock_strong(lock());
size_t remaining_words = c->free_below_committed_words();
if (remaining_words > FreeBlocks::MinWordSize) {
if (remaining_words >= FreeBlocks::MinWordSize) {
UL2(trace, "salvaging chunk " METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
@ -101,6 +101,10 @@ Metachunk* MetaspaceArena::allocate_new_chunk(size_t requested_word_size) {
}
void MetaspaceArena::add_allocation_to_fbl(MetaWord* p, size_t word_size) {
assert(p != nullptr, "p is null");
assert_is_aligned_metaspace_pointer(p);
assert(word_size > 0, "zero sized");
if (_fbl == nullptr) {
_fbl = new FreeBlocks(); // Create only on demand
}
@ -131,7 +135,7 @@ MetaspaceArena::~MetaspaceArena() {
#ifdef ASSERT
SOMETIMES(verify();)
if (Settings::use_allocation_guard()) {
SOMETIMES(verify_allocation_guards();)
verify_allocation_guards();
}
#endif
@ -224,15 +228,16 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) {
UL2(trace, "requested " SIZE_FORMAT " words.", requested_word_size);
MetaWord* p = nullptr;
const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size);
const size_t aligned_word_size = get_raw_word_size_for_requested_word_size(requested_word_size);
// Before bothering the arena proper, attempt to re-use a block from the free blocks list
if (_fbl != nullptr && !_fbl->is_empty()) {
p = _fbl->remove_block(raw_word_size);
p = _fbl->remove_block(aligned_word_size);
if (p != nullptr) {
DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();)
UL2(trace, "taken from fbl (now: %d, " SIZE_FORMAT ").",
_fbl->count(), _fbl->total_size());
UL2(trace, "returning " PTR_FORMAT " - taken from fbl (now: %d, " SIZE_FORMAT ").",
p2i(p), _fbl->count(), _fbl->total_size());
assert_is_aligned_metaspace_pointer(p);
// Note: free blocks in freeblock dictionary still count as "used" as far as statistics go;
// therefore we have no need to adjust any usage counters (see epilogue of allocate_inner())
// and can just return here.
@ -241,7 +246,7 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) {
}
// Primary allocation
p = allocate_inner(requested_word_size);
p = allocate_inner(aligned_word_size);
#ifdef ASSERT
// Fence allocation
@ -264,11 +269,11 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) {
}
// Allocate from the arena proper, once dictionary allocations and fencing are sorted out.
MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
MetaWord* MetaspaceArena::allocate_inner(size_t word_size) {
assert_lock_strong(lock());
assert_is_aligned(word_size, metaspace::AllocationAlignmentWordSize);
const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size);
MetaWord* p = nullptr;
bool current_chunk_too_small = false;
bool commit_failure = false;
@ -279,8 +284,8 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
// If the current chunk is too small to hold the requested size, attempt to enlarge it.
// If that fails, retire the chunk.
if (current_chunk()->free_words() < raw_word_size) {
if (!attempt_enlarge_current_chunk(raw_word_size)) {
if (current_chunk()->free_words() < word_size) {
if (!attempt_enlarge_current_chunk(word_size)) {
current_chunk_too_small = true;
} else {
DEBUG_ONLY(InternalStats::inc_num_chunks_enlarged();)
@ -292,15 +297,15 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
// hit a limit (either GC threshold or MaxMetaspaceSize). In that case retire the
// chunk.
if (!current_chunk_too_small) {
if (!current_chunk()->ensure_committed_additional(raw_word_size)) {
UL2(info, "commit failure (requested size: " SIZE_FORMAT ")", raw_word_size);
if (!current_chunk()->ensure_committed_additional(word_size)) {
UL2(info, "commit failure (requested size: " SIZE_FORMAT ")", word_size);
commit_failure = true;
}
}
// Allocate from the current chunk. This should work now.
if (!current_chunk_too_small && !commit_failure) {
p = current_chunk()->allocate(raw_word_size);
p = current_chunk()->allocate(word_size);
assert(p != nullptr, "Allocation from chunk failed.");
}
}
@ -310,12 +315,12 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
assert(current_chunk() == nullptr ||
current_chunk_too_small || commit_failure, "Sanity");
Metachunk* new_chunk = allocate_new_chunk(raw_word_size);
Metachunk* new_chunk = allocate_new_chunk(word_size);
if (new_chunk != nullptr) {
UL2(debug, "allocated new chunk " METACHUNK_FORMAT " for requested word size " SIZE_FORMAT ".",
METACHUNK_FORMAT_ARGS(new_chunk), requested_word_size);
METACHUNK_FORMAT_ARGS(new_chunk), word_size);
assert(new_chunk->free_below_committed_words() >= raw_word_size, "Sanity");
assert(new_chunk->free_below_committed_words() >= word_size, "Sanity");
// We have a new chunk. Before making it the current chunk, retire the old one.
if (current_chunk() != nullptr) {
@ -326,10 +331,10 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
_chunks.add(new_chunk);
// Now, allocate from that chunk. That should work.
p = current_chunk()->allocate(raw_word_size);
p = current_chunk()->allocate(word_size);
assert(p != nullptr, "Allocation from chunk failed.");
} else {
UL2(info, "failed to allocate new chunk for requested word size " SIZE_FORMAT ".", requested_word_size);
UL2(info, "failed to allocate new chunk for requested word size " SIZE_FORMAT ".", word_size);
}
}
@ -337,7 +342,7 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
InternalStats::inc_num_allocs_failed_limit();
} else {
DEBUG_ONLY(InternalStats::inc_num_allocs();)
_total_used_words_counter->increment_by(raw_word_size);
_total_used_words_counter->increment_by(word_size);
}
SOMETIMES(verify_locked();)
@ -349,6 +354,9 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
_chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk()));
UL2(trace, "returning " PTR_FORMAT ".", p2i(p));
}
assert_is_aligned_metaspace_pointer(p);
return p;
}
@ -365,7 +373,13 @@ void MetaspaceArena::deallocate_locked(MetaWord* p, size_t word_size) {
UL2(trace, "deallocating " PTR_FORMAT ", word size: " SIZE_FORMAT ".",
p2i(p), word_size);
// Only blocks that had been allocated via MetaspaceArena::allocate(size) must be handed in
// to MetaspaceArena::deallocate(), and only with the same size that had been original used for allocation.
// Therefore the pointer must be aligned correctly, and size can be alignment-adjusted (the latter
// only matters on 32-bit):
assert_is_aligned_metaspace_pointer(p);
size_t raw_word_size = get_raw_word_size_for_requested_word_size(word_size);
add_allocation_to_fbl(p, raw_word_size);
SOMETIMES(verify_locked();)

View File

@ -119,6 +119,7 @@ class MetaspaceArena : public CHeapObj<mtClass> {
// Two eyecatchers to easily spot a corrupted _next pointer
const uintx _eye1;
const Fence* const _next;
NOT_LP64(uintx _dummy;)
const uintx _eye2;
public:
Fence(const Fence* next) : _eye1(EyeCatcher), _next(next), _eye2(EyeCatcher) {}

View File

@ -168,23 +168,5 @@ void print_number_of_classes(outputStream* out, uintx classes, uintx classes_sha
}
}
// Given a net allocation word size, return the raw word size we actually allocate.
// Note: externally visible for gtests.
//static
size_t get_raw_word_size_for_requested_word_size(size_t word_size) {
size_t byte_size = word_size * BytesPerWord;
// Deallocated metablocks are kept in a binlist which limits their minimal
// size to at least the size of a binlist item (2 words).
byte_size = MAX2(byte_size, FreeBlocks::MinWordSize * BytesPerWord);
// Metaspace allocations are aligned to word size.
byte_size = align_up(byte_size, AllocationAlignmentByteSize);
size_t raw_word_size = byte_size / BytesPerWord;
assert(raw_word_size * BytesPerWord == byte_size, "Sanity");
return raw_word_size;
}
} // namespace metaspace

View File

@ -38,24 +38,27 @@ namespace metaspace {
// Metaspace allocation alignment:
// 1) Metaspace allocations have to be aligned such that 64bit values are aligned
// correctly.
// Metaspace allocations have to be aligned such that 64-bit values are aligned
// correctly. We currently don't hold members with a larger alignment requirement
// than 64-bit inside MetaData, so 8-byte alignment is enough.
//
// 2) Klass* structures allocated from Metaspace have to be aligned to KlassAlignmentInBytes.
// Klass* structures need to be aligned to KlassAlignmentInBytes, but since that is
// 64-bit, we don't need special handling for allocating Klass*.
//
// At the moment LogKlassAlignmentInBytes is 3, so KlassAlignmentInBytes == 8,
// so (1) and (2) can both be fulfilled with an alignment of 8. Should we increase
// KlassAlignmentInBytes at any time this will increase the necessary alignment as well. In
// that case we may think about introducing a separate alignment just for the class space
// since that alignment would only be needed for Klass structures.
// On 64-bit platforms, we align to word size; on 32-bit, we align to two words.
static const size_t AllocationAlignmentByteSize = 8;
STATIC_ASSERT(AllocationAlignmentByteSize == (size_t)KlassAlignmentInBytes);
static const size_t AllocationAlignmentWordSize = AllocationAlignmentByteSize / BytesPerWord;
// Returns the raw word size allocated for a given net allocation
size_t get_raw_word_size_for_requested_word_size(size_t word_size);
// Returns the raw word size allocated for a given net allocation. This only matters on 32-bit, where
// allocations have to be 64-bit aligned too and therefore must be 2-word-aligned.
inline size_t get_raw_word_size_for_requested_word_size(size_t word_size) {
LP64_ONLY(STATIC_ASSERT(AllocationAlignmentWordSize == 1)); // rewrite if this does not hold true anymore
return LP64_ONLY(word_size) // no-op on 64-bit
NOT_LP64(align_up(word_size, AllocationAlignmentWordSize));
}
// Utility functions
@ -81,8 +84,11 @@ void print_percentage(outputStream* st, size_t total, size_t part);
assert(is_aligned((value), (alignment)), \
SIZE_FORMAT_X " is not aligned to " \
SIZE_FORMAT_X, (size_t)(uintptr_t)value, (size_t)(alignment))
#define assert_is_aligned_metaspace_pointer(p) \
assert_is_aligned((p), metaspace::AllocationAlignmentByteSize);
#else
#define assert_is_aligned(value, alignment)
#define assert_is_aligned_metaspace_pointer(pointer)
#endif
// Pretty printing helpers

View File

@ -46,7 +46,6 @@ using metaspace::MemRangeCounter;
template <class BINLISTTYPE>
struct BinListBasicTest {
static const size_t minws;
static const size_t maxws;
static void basic_test() {
@ -57,7 +56,7 @@ struct BinListBasicTest {
MetaWord arr[1000];
size_t innocous_size = minws + ((maxws - minws) / 2);
size_t innocous_size = MAX2((size_t)1, maxws / 2);
// Try to get a block from an empty list.
size_t real_size = 4711;
@ -88,8 +87,8 @@ struct BinListBasicTest {
MetaWord arr[1000];
for (size_t s1 = minws; s1 <= maxws; s1++) {
for (size_t s2 = minws; s2 <= maxws; s2++) {
for (size_t s1 = 1; s1 <= maxws; s1++) {
for (size_t s2 = 1; s2 <= maxws; s2++) {
bl.add_block(arr, s1);
CHECK_BL_CONTENT(bl, 1, s1);
@ -108,7 +107,7 @@ struct BinListBasicTest {
CHECK_BL_CONTENT(bl, 1, s1);
DEBUG_ONLY(bl.verify();)
// drain bl
p = bl.remove_block(minws, &real_size);
p = bl.remove_block(1, &real_size);
EXPECT_EQ(p, arr);
EXPECT_EQ((size_t)s1, real_size);
CHECK_BL_CONTENT(bl, 0, 0);
@ -129,7 +128,7 @@ struct BinListBasicTest {
ASSERT_EQ(cnt[1].total_size(), bl[1].total_size());
FeederBuffer fb(1024);
RandSizeGenerator rgen(minws, maxws + 1);
RandSizeGenerator rgen(1, maxws + 1);
// feed all
int which = 0;
@ -184,10 +183,10 @@ struct BinListBasicTest {
while (bl[which].is_empty() == false) {
size_t real_size = 4711;
MetaWord* p = bl[which].remove_block(minws, &real_size);
MetaWord* p = bl[which].remove_block(1, &real_size);
ASSERT_NE(p, (MetaWord*) NULL);
ASSERT_GE(real_size, minws);
ASSERT_GE(real_size, (size_t)1);
ASSERT_TRUE(fb.is_valid_range(p, real_size));
// This must hold true since we always return the smallest fit.
@ -205,24 +204,16 @@ struct BinListBasicTest {
}
};
template <typename BINLISTTYPE> const size_t BinListBasicTest<BINLISTTYPE>::minws = BINLISTTYPE::MinWordSize;
template <typename BINLISTTYPE> const size_t BinListBasicTest<BINLISTTYPE>::maxws = BINLISTTYPE::MaxWordSize;
TEST_VM(metaspace, BinList_basic_8) { BinListBasicTest< BinListImpl<2, 8> >::basic_test(); }
TEST_VM(metaspace, BinList_basic_16) { BinListBasicTest< BinListImpl<2, 16> >::basic_test(); }
TEST_VM(metaspace, BinList_basic_1) { BinListBasicTest< BinListImpl<1> >::basic_test(); }
TEST_VM(metaspace, BinList_basic_8) { BinListBasicTest< BinListImpl<8> >::basic_test(); }
TEST_VM(metaspace, BinList_basic_32) { BinListBasicTest<BinList32>::basic_test(); }
TEST_VM(metaspace, BinList_basic_1331) { BinListBasicTest< BinListImpl<13, 31> >::basic_test(); }
TEST_VM(metaspace, BinList_basic_131) { BinListBasicTest< BinListImpl<13, 1> >::basic_test(); }
TEST_VM(metaspace, BinList_basic2_8) { BinListBasicTest< BinListImpl<2, 8> >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic2_16) { BinListBasicTest< BinListImpl<2, 16> >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic2_32) { BinListBasicTest<BinList32 >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic2_1331) { BinListBasicTest< BinListImpl<13, 31> >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic2_131) { BinListBasicTest< BinListImpl<13, 1> >::basic_test_2(); }
TEST_VM(metaspace, BinList_random_test_8) { BinListBasicTest< BinListImpl<2, 8> >::random_test(); }
TEST_VM(metaspace, BinList_random_test_16) { BinListBasicTest< BinListImpl<2, 16> >::random_test(); }
TEST_VM(metaspace, BinList_random_test_32) { BinListBasicTest<BinList32>::random_test(); }
TEST_VM(metaspace, BinList_random_test_1331) { BinListBasicTest< BinListImpl<13, 31> >::random_test(); }
TEST_VM(metaspace, BinList_random_test_131) { BinListBasicTest< BinListImpl<13, 1> >::random_test(); }
TEST_VM(metaspace, BinList_basic_2_1) { BinListBasicTest< BinListImpl<1> >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic_2_8) { BinListBasicTest< BinListImpl<8> >::basic_test_2(); }
TEST_VM(metaspace, BinList_basic_2_32) { BinListBasicTest<BinList32>::basic_test_2(); }
TEST_VM(metaspace, BinList_basic_rand_1) { BinListBasicTest< BinListImpl<1> >::random_test(); }
TEST_VM(metaspace, BinList_basic_rand_8) { BinListBasicTest< BinListImpl<8> >::random_test(); }
TEST_VM(metaspace, BinList_basic_rand_32) { BinListBasicTest<BinList32>::random_test(); }

View File

@ -41,6 +41,7 @@
#include "metaspaceGtestContexts.hpp"
#include "metaspaceGtestRangeHelpers.hpp"
using metaspace::AllocationAlignmentByteSize;
using metaspace::ArenaGrowthPolicy;
using metaspace::CommitLimiter;
using metaspace::InternalStats;
@ -50,11 +51,6 @@ using metaspace::SizeAtomicCounter;
using metaspace::Settings;
using metaspace::ArenaStats;
// See metaspaceArena.cpp : needed for predicting commit sizes.
namespace metaspace {
extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size);
}
class MetaspaceArenaTestHelper {
MetaspaceGtestContext& _context;
@ -179,7 +175,7 @@ public:
ASSERT_EQ(capacity, capacity2);
} else {
// Allocation succeeded. Should be correctly aligned.
ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
ASSERT_TRUE(is_aligned(p, AllocationAlignmentByteSize));
// used: may go up or may not (since our request may have been satisfied from the freeblocklist
// whose content already counts as used).
// committed: may go up, may not

View File

@ -38,6 +38,7 @@
#include "metaspaceGtestContexts.hpp"
#include "metaspaceGtestSparseArray.hpp"
using metaspace::AllocationAlignmentByteSize;
using metaspace::ArenaGrowthPolicy;
using metaspace::ChunkManager;
using metaspace::IntCounter;
@ -52,11 +53,6 @@ static bool fifty_fifty() {
return IntRange(100).random_value() < 50;
}
// See metaspaceArena.cpp : needed for predicting commit sizes.
namespace metaspace {
extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size);
}
// A MetaspaceArenaTestBed contains a single MetaspaceArena and its lock.
// It keeps track of allocations done from this MetaspaceArena.
class MetaspaceArenaTestBed : public CHeapObj<mtInternal> {
@ -179,7 +175,8 @@ public:
size_t word_size = 1 + _allocation_range.random_value();
MetaWord* p = _arena->allocate(word_size);
if (p != NULL) {
EXPECT_TRUE(is_aligned(p, sizeof(MetaWord)));
EXPECT_TRUE(is_aligned(p, AllocationAlignmentByteSize));
allocation_t* a = NEW_C_HEAP_OBJ(allocation_t, mtInternal);
a->word_size = word_size;
a->p = p;

View File

@ -384,8 +384,6 @@ tier1_runtime = \
-runtime/memory/ReserveMemory.java \
-runtime/Metaspace/FragmentMetaspace.java \
-runtime/Metaspace/FragmentMetaspaceSimple.java \
-runtime/Metaspace/elastic/TestMetaspaceAllocationMT1.java \
-runtime/Metaspace/elastic/TestMetaspaceAllocationMT2.java \
-runtime/MirrorFrame/Test8003720.java \
-runtime/modules/LoadUnloadModuleStress.java \
-runtime/modules/ModuleStress/ExportModuleStressTest.java \