8311648: Refactor the Arena/Chunk/ChunkPool interface

Reviewed-by: stuefe, coleenp
This commit is contained in:
Johan Sjölen 2023-08-11 09:32:45 +00:00
parent 43462a36ab
commit 62adeb08c3
5 changed files with 102 additions and 100 deletions

@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"
@ -43,24 +44,18 @@ STATIC_ASSERT(is_aligned((int)Chunk::medium_size, ARENA_AMALLOC_ALIGNMENT));
STATIC_ASSERT(is_aligned((int)Chunk::size, ARENA_AMALLOC_ALIGNMENT));
STATIC_ASSERT(is_aligned((int)Chunk::non_pool_size, ARENA_AMALLOC_ALIGNMENT));
//--------------------------------------------------------------------------------------
// ChunkPool implementation
// MT-safe pool of same-sized chunks to reduce malloc/free thrashing
// NB: not using Mutex because pools are used before Threads are initialized
class ChunkPool {
Chunk* _first; // first cached Chunk; its first word points to next chunk
const size_t _size; // (inner payload) size of the chunks this pool serves
// Our four static pools
static const int _num_pools = 4;
static constexpr int _num_pools = 4;
static ChunkPool _pools[_num_pools];
public:
ChunkPool(size_t size) : _first(nullptr), _size(size) {}
Chunk* _first;
const size_t _size; // (inner payload) size of the chunks this pool serves
// Allocate a chunk from the pool; returns null if pool is empty.
Chunk* allocate() {
// Returns null if pool is empty.
Chunk* take_from_pool() {
ThreadCritical tc;
Chunk* c = _first;
if (_first != nullptr) {
@ -68,16 +63,14 @@ class ChunkPool {
}
return c;
}
// Return a chunk to the pool
void free(Chunk* chunk) {
void return_to_pool(Chunk* chunk) {
assert(chunk->length() == _size, "wrong pool for this chunk");
ThreadCritical tc;
chunk->set_next(_first);
_first = chunk;
}
// Prune the pool
// Clear this pool of all contained chunks
void prune() {
// Free all chunks while in ThreadCritical lock
// so NMT adjustment is stable.
@ -92,13 +85,6 @@ class ChunkPool {
_first = nullptr;
}
static void clean() {
NativeHeapTrimmer::SuspendMark sm("chunk pool cleaner");
for (int i = 0; i < _num_pools; i++) {
_pools[i].prune();
}
}
// Given a (inner payload) size, return the pool responsible for it, or null if the size is non-standard
static ChunkPool* get_pool_for_size(size_t size) {
for (int i = 0; i < _num_pools; i++) {
@ -109,28 +95,22 @@ class ChunkPool {
return nullptr;
}
public:
ChunkPool(size_t size) : _first(nullptr), _size(size) {}
static void clean() {
NativeHeapTrimmer::SuspendMark sm("chunk pool cleaner");
for (int i = 0; i < _num_pools; i++) {
_pools[i].prune();
}
}
// Returns an initialized and null-terminated Chunk of requested size
static Chunk* allocate_chunk(size_t length, AllocFailType alloc_failmode);
static void deallocate_chunk(Chunk* p);
};
ChunkPool ChunkPool::_pools[] = { Chunk::size, Chunk::medium_size, Chunk::init_size, Chunk::tiny_size };
//--------------------------------------------------------------------------------------
// ChunkPoolCleaner implementation
//
class ChunkPoolCleaner : public PeriodicTask {
static const int cleaning_interval = 5000; // cleaning interval in ms
public:
ChunkPoolCleaner() : PeriodicTask(cleaning_interval) {}
void task() {
ChunkPool::clean();
}
};
//--------------------------------------------------------------------------------------
// Chunk implementation
void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, size_t length) throw() {
Chunk* ChunkPool::allocate_chunk(size_t length, AllocFailType alloc_failmode) {
// - requested_size = sizeof(Chunk)
// - length = payload size
// We must ensure that the boundaries of the payload (C and D) are aligned to 64-bit:
@ -149,62 +129,57 @@ void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, siz
// - the payload size (length) must be aligned to 64-bit, which takes care of 64-bit
// aligning (D)
assert(sizeofChunk == sizeof(Chunk), "weird request size");
assert(is_aligned(length, ARENA_AMALLOC_ALIGNMENT), "chunk payload length misaligned: "
SIZE_FORMAT ".", length);
// Try to reuse a freed chunk from the pool
ChunkPool* pool = ChunkPool::get_pool_for_size(length);
Chunk* chunk = nullptr;
if (pool != nullptr) {
Chunk* c = pool->allocate();
Chunk* c = pool->take_from_pool();
if (c != nullptr) {
assert(c->length() == length, "wrong length?");
return c;
chunk = c;
}
}
// Either the pool was empty, or this is a non-standard length. Allocate a new Chunk from C-heap.
size_t bytes = ARENA_ALIGN(sizeofChunk) + length;
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
if (chunk == nullptr) {
// Either the pool was empty, or this is a non-standard length. Allocate a new Chunk from C-heap.
size_t bytes = ARENA_ALIGN(sizeof(Chunk)) + length;
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
}
chunk = (Chunk*)p;
}
::new(chunk) Chunk(length);
// We rely on arena alignment <= malloc alignment.
assert(is_aligned(p, ARENA_AMALLOC_ALIGNMENT), "Chunk start address misaligned.");
return p;
assert(is_aligned(chunk, ARENA_AMALLOC_ALIGNMENT), "Chunk start address misaligned.");
return chunk;
}
void Chunk::operator delete(void* p) {
void ChunkPool::deallocate_chunk(Chunk* c) {
// If this is a standard-sized chunk, return it to its pool; otherwise free it.
Chunk* c = (Chunk*)p;
ChunkPool* pool = ChunkPool::get_pool_for_size(c->length());
if (pool != nullptr) {
pool->free(c);
pool->return_to_pool(c);
} else {
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
os::free(c);
}
}
Chunk::Chunk(size_t length) : _len(length) {
_next = nullptr; // Chain on the linked list
}
ChunkPool ChunkPool::_pools[] = { Chunk::size, Chunk::medium_size, Chunk::init_size, Chunk::tiny_size };
void Chunk::chop() {
Chunk *k = this;
while( k ) {
Chunk *tmp = k->next();
// clear out this chunk (to detect allocation bugs)
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
delete k; // Free chunk (was malloc'd)
k = tmp;
}
}
class ChunkPoolCleaner : public PeriodicTask {
static const int cleaning_interval = 5000; // cleaning interval in ms
void Chunk::next_chop() {
_next->chop();
_next = nullptr;
}
public:
ChunkPoolCleaner() : PeriodicTask(cleaning_interval) {}
void task() {
ChunkPool::clean();
}
};
void Chunk::start_chunk_pool_cleaner_task() {
void Arena::start_chunk_pool_cleaner_task() {
#ifdef ASSERT
static bool task_created = false;
assert(!task_created, "should not start chuck pool cleaner twice");
@ -214,11 +189,30 @@ void Chunk::start_chunk_pool_cleaner_task() {
cleaner->enroll();
}
//------------------------------Arena------------------------------------------
Chunk::Chunk(size_t length) : _len(length) {
_next = nullptr; // Chain on the linked list
}
void Chunk::chop(Chunk* k) {
while (k != nullptr) {
Chunk* tmp = k->next();
// clear out this chunk (to detect allocation bugs)
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
ChunkPool::deallocate_chunk(k);
k = tmp;
}
}
void Chunk::next_chop(Chunk* k) {
assert(k != nullptr && k->_next != nullptr, "must be non-null");
Chunk::chop(k->_next);
k->_next = nullptr;
}
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
init_size = ARENA_ALIGN(init_size);
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_chunk = ChunkPool::allocate_chunk(init_size, AllocFailStrategy::EXIT_OOM);
_first = _chunk;
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
MemTracker::record_new_arena(flag);
@ -226,7 +220,8 @@ Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)
}
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_chunk = ChunkPool::allocate_chunk(Chunk::init_size, AllocFailStrategy::EXIT_OOM);
_first = _chunk;
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
MemTracker::record_new_arena(flag);
@ -244,7 +239,7 @@ void Arena::destruct_contents() {
// that can have total arena memory exceed total chunk memory
set_size_in_bytes(0);
if (_first != nullptr) {
_first->chop();
Chunk::chop(_first);
}
reset();
}
@ -262,7 +257,7 @@ void Arena::set_size_in_bytes(size_t size) {
// Total of all Chunks in arena
size_t Arena::used() const {
size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
Chunk *k = _first;
Chunk* k = _first;
while( k != _chunk) { // Whilst have Chunks in a row
sum += k->length(); // Total size of this Chunk
k = k->next(); // Bump along to next Chunk
@ -280,15 +275,19 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
return nullptr;
}
Chunk *k = _chunk; // Get filled-up chunk address
_chunk = new (alloc_failmode, len) Chunk(len);
Chunk* k = _chunk; // Get filled-up chunk address
_chunk = ChunkPool::allocate_chunk(len, alloc_failmode);
if (_chunk == nullptr) {
_chunk = k; // restore the previous value of _chunk
return nullptr;
}
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
else _first = _chunk;
if (k != nullptr) {
k->set_next(_chunk); // Append new chunk to end of linked list
} else {
_first = _chunk;
}
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
set_size_in_bytes(size_in_bytes() + len);
@ -343,7 +342,7 @@ bool Arena::contains( const void *ptr ) const {
if (_chunk == nullptr) return false;
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
return true; // Check for in this chunk
for (Chunk *c = _first; c; c = c->next()) {
for (Chunk* c = _first; c; c = c->next()) {
if (c == _chunk) continue; // current chunk has been processed
if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
return true; // Check for every chunk in Arena

@ -37,16 +37,19 @@
#define ARENA_AMALLOC_ALIGNMENT BytesPerLong
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))
//------------------------------Chunk------------------------------------------
// Linked list of raw memory chunks
class Chunk: CHeapObj<mtChunk> {
class Chunk {
private:
Chunk* _next; // Next Chunk in list
const size_t _len; // Size of this Chunk
public:
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
void operator delete(void* p);
public:
NONCOPYABLE(Chunk);
void operator delete(void*) = delete;
void* operator new(size_t) = delete;
Chunk(size_t length);
enum {
@ -67,8 +70,8 @@ class Chunk: CHeapObj<mtChunk> {
non_pool_size = init_size + 32 // An initial size which is not one of above
};
void chop(); // Chop this chunk
void next_chop(); // Chop next chunk
static void chop(Chunk* chunk); // Chop this chunk
static void next_chop(Chunk* chunk); // Chop next chunk
static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
@ -79,12 +82,8 @@ class Chunk: CHeapObj<mtChunk> {
char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
char* top() const { return bottom() + _len; }
bool contains(char* p) const { return bottom() <= p && p <= top(); }
// Start the chunk_pool cleaner task
static void start_chunk_pool_cleaner_task();
};
//------------------------------Arena------------------------------------------
// Fast allocation of memory
class Arena : public CHeapObjBase {
protected:
@ -94,9 +93,10 @@ protected:
MEMFLAGS _flags; // Memory tracking flags
Chunk *_first; // First chunk
Chunk *_chunk; // current chunk
char *_hwm, *_max; // High water mark and max in current chunk
Chunk* _first; // First chunk
Chunk* _chunk; // current chunk
char* _hwm; // High water mark
char* _max; // and max in current chunk
// Get a new Chunk of at least size x
void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
size_t _size_in_bytes; // Size of arena (used for native memory tracking)
@ -113,6 +113,9 @@ protected:
}
public:
// Start the chunk_pool cleaner task
static void start_chunk_pool_cleaner_task();
Arena(MEMFLAGS memflag);
Arena(MEMFLAGS memflag, size_t init_size);
~Arena();

@ -112,7 +112,7 @@ public:
"size: " SIZE_FORMAT ", saved size: " SIZE_FORMAT,
size_in_bytes(), state._size_in_bytes);
set_size_in_bytes(state._size_in_bytes);
state._chunk->next_chop();
Chunk::next_chop(state._chunk);
assert(_hwm != state._hwm, "Sanity check: HWM moves when we have later chunks");
} else {
assert(size_in_bytes() == state._size_in_bytes, "Sanity check");

@ -162,7 +162,7 @@ void HandleMark::chop_later_chunks() {
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
_area->set_size_in_bytes(size_in_bytes());
_chunk->next_chop();
Chunk::next_chop(_chunk);
}
void* HandleMark::operator new(size_t size) throw() {

@ -680,7 +680,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
JvmtiAgentList::load_xrun_agents();
}
Chunk::start_chunk_pool_cleaner_task();
Arena::start_chunk_pool_cleaner_task();
// Start the service thread
// The service thread enqueues JVMTI deferred events and does various hashtable