8186089: Move Arena to its own header file
Move classes Chunk and Arena to new arena.hpp and arena.cpp files Reviewed-by: coleenp, gtriantafill
This commit is contained in:
parent
a6dcc4531f
commit
d69af7b386
@ -26,6 +26,7 @@
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/arena.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@ -65,6 +66,18 @@ void MetaspaceObj::print_address_on(outputStream* st) const {
|
||||
st->print(" {" INTPTR_FORMAT "}", p2i(this));
|
||||
}
|
||||
|
||||
void* ResourceObj::operator new(size_t size, Arena *arena) throw() {
|
||||
address res = (address)arena->Amalloc(size);
|
||||
DEBUG_ONLY(set_allocation_type(res, ARENA);)
|
||||
return res;
|
||||
}
|
||||
|
||||
void* ResourceObj::operator new [](size_t size, Arena *arena) throw() {
|
||||
address res = (address)arena->Amalloc(size);
|
||||
DEBUG_ONLY(set_allocation_type(res, ARENA);)
|
||||
return res;
|
||||
}
|
||||
|
||||
void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
|
||||
address res = NULL;
|
||||
switch (type) {
|
||||
@ -210,456 +223,6 @@ void trace_heap_free(void* p) {
|
||||
tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p));
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// ChunkPool implementation
|
||||
|
||||
// MT-safe pool of chunks to reduce malloc/free thrashing
|
||||
// NB: not using Mutex because pools are used before Threads are initialized
|
||||
class ChunkPool: public CHeapObj<mtInternal> {
|
||||
Chunk* _first; // first cached Chunk; its first word points to next chunk
|
||||
size_t _num_chunks; // number of unused chunks in pool
|
||||
size_t _num_used; // number of chunks currently checked out
|
||||
const size_t _size; // size of each chunk (must be uniform)
|
||||
|
||||
// Our four static pools
|
||||
static ChunkPool* _large_pool;
|
||||
static ChunkPool* _medium_pool;
|
||||
static ChunkPool* _small_pool;
|
||||
static ChunkPool* _tiny_pool;
|
||||
|
||||
// return first element or null
|
||||
void* get_first() {
|
||||
Chunk* c = _first;
|
||||
if (_first) {
|
||||
_first = _first->next();
|
||||
_num_chunks--;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
public:
|
||||
// All chunks in a ChunkPool has the same size
|
||||
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
|
||||
|
||||
// Allocate a new chunk from the pool (might expand the pool)
|
||||
NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
|
||||
assert(bytes == _size, "bad size");
|
||||
void* p = NULL;
|
||||
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
|
||||
// should be done outside ThreadCritical lock due to NMT
|
||||
{ ThreadCritical tc;
|
||||
_num_used++;
|
||||
p = get_first();
|
||||
}
|
||||
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
// Return a chunk to the pool
|
||||
void free(Chunk* chunk) {
|
||||
assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
|
||||
ThreadCritical tc;
|
||||
_num_used--;
|
||||
|
||||
// Add chunk to list
|
||||
chunk->set_next(_first);
|
||||
_first = chunk;
|
||||
_num_chunks++;
|
||||
}
|
||||
|
||||
// Prune the pool
|
||||
void free_all_but(size_t n) {
|
||||
Chunk* cur = NULL;
|
||||
Chunk* next;
|
||||
{
|
||||
// if we have more than n chunks, free all of them
|
||||
ThreadCritical tc;
|
||||
if (_num_chunks > n) {
|
||||
// free chunks at end of queue, for better locality
|
||||
cur = _first;
|
||||
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
|
||||
|
||||
if (cur != NULL) {
|
||||
next = cur->next();
|
||||
cur->set_next(NULL);
|
||||
cur = next;
|
||||
|
||||
// Free all remaining chunks while in ThreadCritical lock
|
||||
// so NMT adjustment is stable.
|
||||
while(cur != NULL) {
|
||||
next = cur->next();
|
||||
os::free(cur);
|
||||
_num_chunks--;
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Accessors to preallocated pool's
|
||||
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
|
||||
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
|
||||
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
|
||||
static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
|
||||
|
||||
static void initialize() {
|
||||
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
|
||||
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
|
||||
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
|
||||
_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
|
||||
}
|
||||
|
||||
static void clean() {
|
||||
enum { BlocksToKeep = 5 };
|
||||
_tiny_pool->free_all_but(BlocksToKeep);
|
||||
_small_pool->free_all_but(BlocksToKeep);
|
||||
_medium_pool->free_all_but(BlocksToKeep);
|
||||
_large_pool->free_all_but(BlocksToKeep);
|
||||
}
|
||||
};
|
||||
|
||||
ChunkPool* ChunkPool::_large_pool = NULL;
|
||||
ChunkPool* ChunkPool::_medium_pool = NULL;
|
||||
ChunkPool* ChunkPool::_small_pool = NULL;
|
||||
ChunkPool* ChunkPool::_tiny_pool = NULL;
|
||||
|
||||
void chunkpool_init() {
|
||||
ChunkPool::initialize();
|
||||
}
|
||||
|
||||
void
|
||||
Chunk::clean_chunk_pool() {
|
||||
ChunkPool::clean();
|
||||
}
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// ChunkPoolCleaner implementation
|
||||
//
|
||||
|
||||
class ChunkPoolCleaner : public PeriodicTask {
|
||||
enum { CleaningInterval = 5000 }; // cleaning interval in ms
|
||||
|
||||
public:
|
||||
ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
|
||||
void task() {
|
||||
ChunkPool::clean();
|
||||
}
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// Chunk implementation
|
||||
|
||||
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
|
||||
// requested_size is equal to sizeof(Chunk) but in order for the arena
|
||||
// allocations to come out aligned as expected the size must be aligned
|
||||
// to expected arena alignment.
|
||||
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
|
||||
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
|
||||
size_t bytes = ARENA_ALIGN(requested_size) + length;
|
||||
switch (length) {
|
||||
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
|
||||
default: {
|
||||
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Chunk::operator delete(void* p) {
|
||||
Chunk* c = (Chunk*)p;
|
||||
switch (c->length()) {
|
||||
case Chunk::size: ChunkPool::large_pool()->free(c); break;
|
||||
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
|
||||
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
|
||||
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
|
||||
default:
|
||||
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
|
||||
os::free(c);
|
||||
}
|
||||
}
|
||||
|
||||
Chunk::Chunk(size_t length) : _len(length) {
|
||||
_next = NULL; // Chain on the linked list
|
||||
}
|
||||
|
||||
|
||||
void Chunk::chop() {
|
||||
Chunk *k = this;
|
||||
while( k ) {
|
||||
Chunk *tmp = k->next();
|
||||
// clear out this chunk (to detect allocation bugs)
|
||||
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
|
||||
delete k; // Free chunk (was malloc'd)
|
||||
k = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
void Chunk::next_chop() {
|
||||
_next->chop();
|
||||
_next = NULL;
|
||||
}
|
||||
|
||||
|
||||
void Chunk::start_chunk_pool_cleaner_task() {
|
||||
#ifdef ASSERT
|
||||
static bool task_created = false;
|
||||
assert(!task_created, "should not start chuck pool cleaner twice");
|
||||
task_created = true;
|
||||
#endif
|
||||
ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
|
||||
cleaner->enroll();
|
||||
}
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
|
||||
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
|
||||
size_t round_size = (sizeof (char *)) - 1;
|
||||
init_size = (init_size+round_size) & ~round_size;
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(init_size);
|
||||
}
|
||||
|
||||
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(Chunk::init_size);
|
||||
}
|
||||
|
||||
Arena *Arena::move_contents(Arena *copy) {
|
||||
copy->destruct_contents();
|
||||
copy->_chunk = _chunk;
|
||||
copy->_hwm = _hwm;
|
||||
copy->_max = _max;
|
||||
copy->_first = _first;
|
||||
|
||||
// workaround rare racing condition, which could double count
|
||||
// the arena size by native memory tracking
|
||||
size_t size = size_in_bytes();
|
||||
set_size_in_bytes(0);
|
||||
copy->set_size_in_bytes(size);
|
||||
// Destroy original arena
|
||||
reset();
|
||||
return copy; // Return Arena with contents
|
||||
}
|
||||
|
||||
Arena::~Arena() {
|
||||
destruct_contents();
|
||||
MemTracker::record_arena_free(_flags);
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size) throw() {
|
||||
assert(false, "Use dynamic memory type binding");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
|
||||
assert(false, "Use dynamic memory type binding");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// dynamic memory type binding
|
||||
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return (void *) AllocateHeap(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = os::malloc(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return os::malloc(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Arena::operator delete(void* p) {
|
||||
FreeHeap(p);
|
||||
}
|
||||
|
||||
// Destroy this arenas contents and reset to empty
|
||||
void Arena::destruct_contents() {
|
||||
if (UseMallocOnly && _first != NULL) {
|
||||
char* end = _first->next() ? _first->top() : _hwm;
|
||||
free_malloced_objects(_first, _first->bottom(), end, _hwm);
|
||||
}
|
||||
// reset size before chop to avoid a rare racing condition
|
||||
// that can have total arena memory exceed total chunk memory
|
||||
set_size_in_bytes(0);
|
||||
_first->chop();
|
||||
reset();
|
||||
}
|
||||
|
||||
// This is high traffic method, but many calls actually don't
|
||||
// change the size
|
||||
void Arena::set_size_in_bytes(size_t size) {
|
||||
if (_size_in_bytes != size) {
|
||||
long delta = (long)(size - size_in_bytes());
|
||||
_size_in_bytes = size;
|
||||
MemTracker::record_arena_size_change(delta, _flags);
|
||||
}
|
||||
}
|
||||
|
||||
// Total of all Chunks in arena
|
||||
size_t Arena::used() const {
|
||||
size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
|
||||
register Chunk *k = _first;
|
||||
while( k != _chunk) { // Whilst have Chunks in a row
|
||||
sum += k->length(); // Total size of this Chunk
|
||||
k = k->next(); // Bump along to next Chunk
|
||||
}
|
||||
return sum; // Return total consumed space.
|
||||
}
|
||||
|
||||
void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
|
||||
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
|
||||
}
|
||||
|
||||
// Grow a new Chunk
|
||||
void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
|
||||
// Get minimal required size. Either real big, or even bigger for giant objs
|
||||
size_t len = MAX2(x, (size_t) Chunk::size);
|
||||
|
||||
Chunk *k = _chunk; // Get filled-up chunk address
|
||||
_chunk = new (alloc_failmode, len) Chunk(len);
|
||||
|
||||
if (_chunk == NULL) {
|
||||
_chunk = k; // restore the previous value of _chunk
|
||||
return NULL;
|
||||
}
|
||||
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
|
||||
else _first = _chunk;
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
set_size_in_bytes(size_in_bytes() + len);
|
||||
void* result = _hwm;
|
||||
_hwm += x;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Reallocate storage in Arena.
|
||||
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
|
||||
if (new_size == 0) return NULL;
|
||||
#ifdef ASSERT
|
||||
if (UseMallocOnly) {
|
||||
// always allocate a new object (otherwise we'll free this one twice)
|
||||
char* copy = (char*)Amalloc(new_size, alloc_failmode);
|
||||
if (copy == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
size_t n = MIN2(old_size, new_size);
|
||||
if (n > 0) memcpy(copy, old_ptr, n);
|
||||
Afree(old_ptr,old_size); // Mostly done to keep stats accurate
|
||||
return copy;
|
||||
}
|
||||
#endif
|
||||
char *c_old = (char*)old_ptr; // Handy name
|
||||
// Stupid fast special case
|
||||
if( new_size <= old_size ) { // Shrink in-place
|
||||
if( c_old+old_size == _hwm) // Attempt to free the excess bytes
|
||||
_hwm = c_old+new_size; // Adjust hwm
|
||||
return c_old;
|
||||
}
|
||||
|
||||
// make sure that new_size is legal
|
||||
size_t corrected_new_size = ARENA_ALIGN(new_size);
|
||||
|
||||
// See if we can resize in-place
|
||||
if( (c_old+old_size == _hwm) && // Adjusting recent thing
|
||||
(c_old+corrected_new_size <= _max) ) { // Still fits where it sits
|
||||
_hwm = c_old+corrected_new_size; // Adjust hwm
|
||||
return c_old; // Return old pointer
|
||||
}
|
||||
|
||||
// Oops, got to relocate guts
|
||||
void *new_ptr = Amalloc(new_size, alloc_failmode);
|
||||
if (new_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
memcpy( new_ptr, c_old, old_size );
|
||||
Afree(c_old,old_size); // Mostly done to keep stats accurate
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
|
||||
// Determine if pointer belongs to this Arena or not.
|
||||
bool Arena::contains( const void *ptr ) const {
|
||||
#ifdef ASSERT
|
||||
if (UseMallocOnly) {
|
||||
// really slow, but not easy to make fast
|
||||
if (_chunk == NULL) return false;
|
||||
char** bottom = (char**)_chunk->bottom();
|
||||
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
|
||||
if (*p == ptr) return true;
|
||||
}
|
||||
for (Chunk *c = _first; c != NULL; c = c->next()) {
|
||||
if (c == _chunk) continue; // current chunk has been processed
|
||||
char** bottom = (char**)c->bottom();
|
||||
for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
|
||||
if (*p == ptr) return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
|
||||
return true; // Check for in this chunk
|
||||
for (Chunk *c = _first; c; c = c->next()) {
|
||||
if (c == _chunk) continue; // current chunk has been processed
|
||||
if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
|
||||
return true; // Check for every chunk in Arena
|
||||
}
|
||||
}
|
||||
return false; // Not in any Chunk, so not in Arena
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void* Arena::malloc(size_t size) {
|
||||
assert(UseMallocOnly, "shouldn't call");
|
||||
// use malloc, but save pointer in res. area for later freeing
|
||||
char** save = (char**)internal_malloc_4(sizeof(char*));
|
||||
return (*save = (char*)os::malloc(size, mtChunk));
|
||||
}
|
||||
|
||||
// for debugging with UseMallocOnly
|
||||
void* Arena::internal_malloc_4(size_t x) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
check_for_overflow(x, "Arena::internal_malloc_4");
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// Non-product code
|
||||
|
||||
@ -675,10 +238,6 @@ void AllocatedObj::print_value_on(outputStream* st) const {
|
||||
st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
|
||||
}
|
||||
|
||||
julong Arena::_bytes_allocated = 0;
|
||||
|
||||
void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
|
||||
|
||||
AllocStats::AllocStats() {
|
||||
start_mallocs = os::num_mallocs;
|
||||
start_frees = os::num_frees;
|
||||
@ -698,38 +257,6 @@ void AllocStats::print() {
|
||||
num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
|
||||
}
|
||||
|
||||
|
||||
// debugging code
|
||||
inline void Arena::free_all(char** start, char** end) {
|
||||
for (char** p = start; p < end; p++) if (*p) os::free(*p);
|
||||
}
|
||||
|
||||
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
|
||||
assert(UseMallocOnly, "should not call");
|
||||
// free all objects malloced since resource mark was created; resource area
|
||||
// contains their addresses
|
||||
if (chunk->next()) {
|
||||
// this chunk is full, and some others too
|
||||
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
|
||||
char* top = c->top();
|
||||
if (c->next() == NULL) {
|
||||
top = hwm2; // last junk is only used up to hwm2
|
||||
assert(c->contains(hwm2), "bad hwm2");
|
||||
}
|
||||
free_all((char**)c->bottom(), (char**)top);
|
||||
}
|
||||
assert(chunk->contains(hwm), "bad hwm");
|
||||
assert(chunk->contains(max), "bad max");
|
||||
free_all((char**)hwm, (char**)max);
|
||||
} else {
|
||||
// this chunk was partially used
|
||||
assert(chunk->contains(hwm), "bad hwm");
|
||||
assert(chunk->contains(hwm2), "bad hwm2");
|
||||
free_all((char**)hwm, (char**)hwm2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ReallocMark::ReallocMark() {
|
||||
#ifdef ASSERT
|
||||
Thread *thread = Thread::current();
|
||||
|
@ -28,24 +28,9 @@
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_globals.hpp"
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
#include "opto/c2_globals.hpp"
|
||||
#endif
|
||||
|
||||
#include <new>
|
||||
|
||||
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.
|
||||
// Note: this value must be a power of 2
|
||||
|
||||
#define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
|
||||
|
||||
#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
|
||||
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
|
||||
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
|
||||
|
||||
class AllocFailStrategy {
|
||||
public:
|
||||
enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
|
||||
@ -307,6 +292,8 @@ class MetaspaceObj {
|
||||
|
||||
// Base class for classes that constitute name spaces.
|
||||
|
||||
class Arena;
|
||||
|
||||
class AllStatic {
|
||||
public:
|
||||
AllStatic() { ShouldNotCallThis(); }
|
||||
@ -314,219 +301,6 @@ class AllStatic {
|
||||
};
|
||||
|
||||
|
||||
//------------------------------Chunk------------------------------------------
|
||||
// Linked list of raw memory chunks
|
||||
class Chunk: CHeapObj<mtChunk> {
|
||||
friend class VMStructs;
|
||||
|
||||
protected:
|
||||
Chunk* _next; // Next Chunk in list
|
||||
const size_t _len; // Size of this Chunk
|
||||
public:
|
||||
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
|
||||
void operator delete(void* p);
|
||||
Chunk(size_t length);
|
||||
|
||||
enum {
|
||||
// default sizes; make them slightly smaller than 2**k to guard against
|
||||
// buddy-system style malloc implementations
|
||||
#ifdef _LP64
|
||||
slack = 40, // [RGV] Not sure if this is right, but make it
|
||||
// a multiple of 8.
|
||||
#else
|
||||
slack = 20, // suspected sizeof(Chunk) + internal malloc headers
|
||||
#endif
|
||||
|
||||
tiny_size = 256 - slack, // Size of first chunk (tiny)
|
||||
init_size = 1*K - slack, // Size of first chunk (normal aka small)
|
||||
medium_size= 10*K - slack, // Size of medium-sized chunk
|
||||
size = 32*K - slack, // Default size of an Arena chunk (following the first)
|
||||
non_pool_size = init_size + 32 // An initial size which is not one of above
|
||||
};
|
||||
|
||||
void chop(); // Chop this chunk
|
||||
void next_chop(); // Chop next chunk
|
||||
static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
|
||||
static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
|
||||
|
||||
size_t length() const { return _len; }
|
||||
Chunk* next() const { return _next; }
|
||||
void set_next(Chunk* n) { _next = n; }
|
||||
// Boundaries of data area (possibly unused)
|
||||
char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
|
||||
char* top() const { return bottom() + _len; }
|
||||
bool contains(char* p) const { return bottom() <= p && p <= top(); }
|
||||
|
||||
// Start the chunk_pool cleaner task
|
||||
static void start_chunk_pool_cleaner_task();
|
||||
|
||||
static void clean_chunk_pool();
|
||||
};
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
// Fast allocation of memory
|
||||
class Arena : public CHeapObj<mtNone> {
|
||||
protected:
|
||||
friend class ResourceMark;
|
||||
friend class HandleMark;
|
||||
friend class NoHandleMark;
|
||||
friend class VMStructs;
|
||||
|
||||
MEMFLAGS _flags; // Memory tracking flags
|
||||
|
||||
Chunk *_first; // First chunk
|
||||
Chunk *_chunk; // current chunk
|
||||
char *_hwm, *_max; // High water mark and max in current chunk
|
||||
// Get a new Chunk of at least size x
|
||||
void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
size_t _size_in_bytes; // Size of arena (used for native memory tracking)
|
||||
|
||||
NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
|
||||
friend class AllocStats;
|
||||
debug_only(void* malloc(size_t size);)
|
||||
debug_only(void* internal_malloc_4(size_t x);)
|
||||
NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
|
||||
|
||||
void signal_out_of_memory(size_t request, const char* whence) const;
|
||||
|
||||
bool check_for_overflow(size_t request, const char* whence,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
|
||||
if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
|
||||
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
|
||||
return false;
|
||||
}
|
||||
signal_out_of_memory(request, whence);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
Arena(MEMFLAGS memflag);
|
||||
Arena(MEMFLAGS memflag, size_t init_size);
|
||||
~Arena();
|
||||
void destruct_contents();
|
||||
char* hwm() const { return _hwm; }
|
||||
|
||||
// new operators
|
||||
void* operator new (size_t size) throw();
|
||||
void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
|
||||
|
||||
// dynamic memory type tagging
|
||||
void* operator new(size_t size, MEMFLAGS flags) throw();
|
||||
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
|
||||
void operator delete(void* p);
|
||||
|
||||
// Fast allocate in the arena. Common case is: pointer test + increment.
|
||||
void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
|
||||
x = ARENA_ALIGN(x);
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
// Further assume size is padded out to words
|
||||
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate with 'double' alignment. It is 8 bytes on sparc.
|
||||
// In other cases Amalloc_D() should be the same as Amalloc_4().
|
||||
void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
#if defined(SPARC) && !defined(_LP64)
|
||||
#define DALIGN_M1 7
|
||||
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
|
||||
x += delta;
|
||||
#endif
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
#if defined(SPARC) && !defined(_LP64)
|
||||
old += delta; // align to 8-bytes
|
||||
#endif
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
||||
// Fast delete in area. Common case is: NOP (except for storage reclaimed)
|
||||
void Afree(void *ptr, size_t size) {
|
||||
#ifdef ASSERT
|
||||
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
|
||||
if (UseMallocOnly) return;
|
||||
#endif
|
||||
if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
|
||||
}
|
||||
|
||||
void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
|
||||
// Move contents of this arena into an empty arena
|
||||
Arena *move_contents(Arena *empty_arena);
|
||||
|
||||
// Determine if pointer belongs to this Arena or not.
|
||||
bool contains( const void *ptr ) const;
|
||||
|
||||
// Total of all chunks in use (not thread-safe)
|
||||
size_t used() const;
|
||||
|
||||
// Total # of bytes used
|
||||
size_t size_in_bytes() const { return _size_in_bytes; };
|
||||
void set_size_in_bytes(size_t size);
|
||||
|
||||
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
|
||||
static void free_all(char** start, char** end) PRODUCT_RETURN;
|
||||
|
||||
private:
|
||||
// Reset this Arena to empty, access will trigger grow if necessary
|
||||
void reset(void) {
|
||||
_first = _chunk = NULL;
|
||||
_hwm = _max = NULL;
|
||||
set_size_in_bytes(0);
|
||||
}
|
||||
};
|
||||
|
||||
// One of the following macros must be used when allocating
|
||||
// an array or object from an arena
|
||||
#define NEW_ARENA_ARRAY(arena, type, size) \
|
||||
(type*) (arena)->Amalloc((size) * sizeof(type))
|
||||
|
||||
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
|
||||
(type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
|
||||
(new_size) * sizeof(type) )
|
||||
|
||||
#define FREE_ARENA_ARRAY(arena, type, old, size) \
|
||||
(arena)->Afree((char*)(old), (size) * sizeof(type))
|
||||
|
||||
#define NEW_ARENA_OBJ(arena, type) \
|
||||
NEW_ARENA_ARRAY(arena, type, 1)
|
||||
|
||||
|
||||
//%note allocation_1
|
||||
extern char* resource_allocate_bytes(size_t size,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
extern char* resource_allocate_bytes(Thread* thread, size_t size,
|
||||
@ -574,17 +348,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
|
||||
allocation_type type, MEMFLAGS flags) throw();
|
||||
|
||||
void* operator new(size_t size, Arena *arena) throw() {
|
||||
address res = (address)arena->Amalloc(size);
|
||||
DEBUG_ONLY(set_allocation_type(res, ARENA);)
|
||||
return res;
|
||||
}
|
||||
void* operator new(size_t size, Arena *arena) throw();
|
||||
|
||||
void* operator new [](size_t size, Arena *arena) throw() {
|
||||
address res = (address)arena->Amalloc(size);
|
||||
DEBUG_ONLY(set_allocation_type(res, ARENA);)
|
||||
return res;
|
||||
}
|
||||
void* operator new [](size_t size, Arena *arena) throw();
|
||||
|
||||
void* operator new(size_t size) throw() {
|
||||
address res = (address)resource_allocate_bytes(size);
|
||||
|
526
hotspot/src/share/vm/memory/arena.cpp
Normal file
526
hotspot/src/share/vm/memory/arena.cpp
Normal file
@ -0,0 +1,526 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/task.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// ChunkPool implementation
|
||||
|
||||
// MT-safe pool of chunks to reduce malloc/free thrashing
|
||||
// NB: not using Mutex because pools are used before Threads are initialized
|
||||
class ChunkPool: public CHeapObj<mtInternal> {
|
||||
Chunk* _first; // first cached Chunk; its first word points to next chunk
|
||||
size_t _num_chunks; // number of unused chunks in pool
|
||||
size_t _num_used; // number of chunks currently checked out
|
||||
const size_t _size; // size of each chunk (must be uniform)
|
||||
|
||||
// Our four static pools
|
||||
static ChunkPool* _large_pool;
|
||||
static ChunkPool* _medium_pool;
|
||||
static ChunkPool* _small_pool;
|
||||
static ChunkPool* _tiny_pool;
|
||||
|
||||
// return first element or null
|
||||
void* get_first() {
|
||||
Chunk* c = _first;
|
||||
if (_first) {
|
||||
_first = _first->next();
|
||||
_num_chunks--;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
public:
|
||||
// All chunks in a ChunkPool has the same size
|
||||
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
|
||||
|
||||
// Allocate a new chunk from the pool (might expand the pool)
|
||||
NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
|
||||
assert(bytes == _size, "bad size");
|
||||
void* p = NULL;
|
||||
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
|
||||
// should be done outside ThreadCritical lock due to NMT
|
||||
{ ThreadCritical tc;
|
||||
_num_used++;
|
||||
p = get_first();
|
||||
}
|
||||
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
// Return a chunk to the pool
|
||||
void free(Chunk* chunk) {
|
||||
assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
|
||||
ThreadCritical tc;
|
||||
_num_used--;
|
||||
|
||||
// Add chunk to list
|
||||
chunk->set_next(_first);
|
||||
_first = chunk;
|
||||
_num_chunks++;
|
||||
}
|
||||
|
||||
// Prune the pool
|
||||
void free_all_but(size_t n) {
|
||||
Chunk* cur = NULL;
|
||||
Chunk* next;
|
||||
{
|
||||
// if we have more than n chunks, free all of them
|
||||
ThreadCritical tc;
|
||||
if (_num_chunks > n) {
|
||||
// free chunks at end of queue, for better locality
|
||||
cur = _first;
|
||||
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
|
||||
|
||||
if (cur != NULL) {
|
||||
next = cur->next();
|
||||
cur->set_next(NULL);
|
||||
cur = next;
|
||||
|
||||
// Free all remaining chunks while in ThreadCritical lock
|
||||
// so NMT adjustment is stable.
|
||||
while(cur != NULL) {
|
||||
next = cur->next();
|
||||
os::free(cur);
|
||||
_num_chunks--;
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Accessors to preallocated pool's
|
||||
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
|
||||
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
|
||||
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
|
||||
static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
|
||||
|
||||
static void initialize() {
|
||||
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
|
||||
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
|
||||
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
|
||||
_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
|
||||
}
|
||||
|
||||
static void clean() {
|
||||
enum { BlocksToKeep = 5 };
|
||||
_tiny_pool->free_all_but(BlocksToKeep);
|
||||
_small_pool->free_all_but(BlocksToKeep);
|
||||
_medium_pool->free_all_but(BlocksToKeep);
|
||||
_large_pool->free_all_but(BlocksToKeep);
|
||||
}
|
||||
};
|
||||
|
||||
ChunkPool* ChunkPool::_large_pool = NULL;
|
||||
ChunkPool* ChunkPool::_medium_pool = NULL;
|
||||
ChunkPool* ChunkPool::_small_pool = NULL;
|
||||
ChunkPool* ChunkPool::_tiny_pool = NULL;
|
||||
|
||||
void chunkpool_init() {
|
||||
ChunkPool::initialize();
|
||||
}
|
||||
|
||||
void
|
||||
Chunk::clean_chunk_pool() {
|
||||
ChunkPool::clean();
|
||||
}
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// ChunkPoolCleaner implementation
|
||||
//
|
||||
|
||||
class ChunkPoolCleaner : public PeriodicTask {
|
||||
enum { CleaningInterval = 5000 }; // cleaning interval in ms
|
||||
|
||||
public:
|
||||
ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
|
||||
void task() {
|
||||
ChunkPool::clean();
|
||||
}
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// Chunk implementation
|
||||
|
||||
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
|
||||
// requested_size is equal to sizeof(Chunk) but in order for the arena
|
||||
// allocations to come out aligned as expected the size must be aligned
|
||||
// to expected arena alignment.
|
||||
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
|
||||
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
|
||||
size_t bytes = ARENA_ALIGN(requested_size) + length;
|
||||
switch (length) {
|
||||
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
|
||||
default: {
|
||||
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Chunk::operator delete(void* p) {
|
||||
Chunk* c = (Chunk*)p;
|
||||
switch (c->length()) {
|
||||
case Chunk::size: ChunkPool::large_pool()->free(c); break;
|
||||
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
|
||||
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
|
||||
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
|
||||
default:
|
||||
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
|
||||
os::free(c);
|
||||
}
|
||||
}
|
||||
|
||||
Chunk::Chunk(size_t length) : _len(length) {
|
||||
_next = NULL; // Chain on the linked list
|
||||
}
|
||||
|
||||
void Chunk::chop() {
|
||||
Chunk *k = this;
|
||||
while( k ) {
|
||||
Chunk *tmp = k->next();
|
||||
// clear out this chunk (to detect allocation bugs)
|
||||
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
|
||||
delete k; // Free chunk (was malloc'd)
|
||||
k = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
void Chunk::next_chop() {
|
||||
_next->chop();
|
||||
_next = NULL;
|
||||
}
|
||||
|
||||
void Chunk::start_chunk_pool_cleaner_task() {
|
||||
#ifdef ASSERT
|
||||
static bool task_created = false;
|
||||
assert(!task_created, "should not start chuck pool cleaner twice");
|
||||
task_created = true;
|
||||
#endif
|
||||
ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
|
||||
cleaner->enroll();
|
||||
}
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
|
||||
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
|
||||
size_t round_size = (sizeof (char *)) - 1;
|
||||
init_size = (init_size+round_size) & ~round_size;
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(init_size);
|
||||
}
|
||||
|
||||
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(Chunk::init_size);
|
||||
}
|
||||
|
||||
Arena *Arena::move_contents(Arena *copy) {
|
||||
copy->destruct_contents();
|
||||
copy->_chunk = _chunk;
|
||||
copy->_hwm = _hwm;
|
||||
copy->_max = _max;
|
||||
copy->_first = _first;
|
||||
|
||||
// workaround rare racing condition, which could double count
|
||||
// the arena size by native memory tracking
|
||||
size_t size = size_in_bytes();
|
||||
set_size_in_bytes(0);
|
||||
copy->set_size_in_bytes(size);
|
||||
// Destroy original arena
|
||||
reset();
|
||||
return copy; // Return Arena with contents
|
||||
}
|
||||
|
||||
Arena::~Arena() {
|
||||
destruct_contents();
|
||||
MemTracker::record_arena_free(_flags);
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size) throw() {
|
||||
assert(false, "Use dynamic memory type binding");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
|
||||
assert(false, "Use dynamic memory type binding");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// dynamic memory type binding
|
||||
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return (void *) AllocateHeap(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = os::malloc(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return os::malloc(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Arena::operator delete(void* p) {
|
||||
FreeHeap(p);
|
||||
}
|
||||
|
||||
// Destroy this arenas contents and reset to empty
|
||||
void Arena::destruct_contents() {
|
||||
if (UseMallocOnly && _first != NULL) {
|
||||
char* end = _first->next() ? _first->top() : _hwm;
|
||||
free_malloced_objects(_first, _first->bottom(), end, _hwm);
|
||||
}
|
||||
// reset size before chop to avoid a rare racing condition
|
||||
// that can have total arena memory exceed total chunk memory
|
||||
set_size_in_bytes(0);
|
||||
_first->chop();
|
||||
reset();
|
||||
}
|
||||
|
||||
// This is high traffic method, but many calls actually don't
|
||||
// change the size
|
||||
void Arena::set_size_in_bytes(size_t size) {
|
||||
if (_size_in_bytes != size) {
|
||||
long delta = (long)(size - size_in_bytes());
|
||||
_size_in_bytes = size;
|
||||
MemTracker::record_arena_size_change(delta, _flags);
|
||||
}
|
||||
}
|
||||
|
||||
// Total of all Chunks in arena
|
||||
size_t Arena::used() const {
|
||||
size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
|
||||
register Chunk *k = _first;
|
||||
while( k != _chunk) { // Whilst have Chunks in a row
|
||||
sum += k->length(); // Total size of this Chunk
|
||||
k = k->next(); // Bump along to next Chunk
|
||||
}
|
||||
return sum; // Return total consumed space.
|
||||
}
|
||||
|
||||
void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
|
||||
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
|
||||
}
|
||||
|
||||
// Grow a new Chunk
|
||||
void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
|
||||
// Get minimal required size. Either real big, or even bigger for giant objs
|
||||
size_t len = MAX2(x, (size_t) Chunk::size);
|
||||
|
||||
Chunk *k = _chunk; // Get filled-up chunk address
|
||||
_chunk = new (alloc_failmode, len) Chunk(len);
|
||||
|
||||
if (_chunk == NULL) {
|
||||
_chunk = k; // restore the previous value of _chunk
|
||||
return NULL;
|
||||
}
|
||||
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
|
||||
else _first = _chunk;
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
set_size_in_bytes(size_in_bytes() + len);
|
||||
void* result = _hwm;
|
||||
_hwm += x;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Reallocate storage in Arena.
|
||||
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
|
||||
if (new_size == 0) return NULL;
|
||||
#ifdef ASSERT
|
||||
if (UseMallocOnly) {
|
||||
// always allocate a new object (otherwise we'll free this one twice)
|
||||
char* copy = (char*)Amalloc(new_size, alloc_failmode);
|
||||
if (copy == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
size_t n = MIN2(old_size, new_size);
|
||||
if (n > 0) memcpy(copy, old_ptr, n);
|
||||
Afree(old_ptr,old_size); // Mostly done to keep stats accurate
|
||||
return copy;
|
||||
}
|
||||
#endif
|
||||
char *c_old = (char*)old_ptr; // Handy name
|
||||
// Stupid fast special case
|
||||
if( new_size <= old_size ) { // Shrink in-place
|
||||
if( c_old+old_size == _hwm) // Attempt to free the excess bytes
|
||||
_hwm = c_old+new_size; // Adjust hwm
|
||||
return c_old;
|
||||
}
|
||||
|
||||
// make sure that new_size is legal
|
||||
size_t corrected_new_size = ARENA_ALIGN(new_size);
|
||||
|
||||
// See if we can resize in-place
|
||||
if( (c_old+old_size == _hwm) && // Adjusting recent thing
|
||||
(c_old+corrected_new_size <= _max) ) { // Still fits where it sits
|
||||
_hwm = c_old+corrected_new_size; // Adjust hwm
|
||||
return c_old; // Return old pointer
|
||||
}
|
||||
|
||||
// Oops, got to relocate guts
|
||||
void *new_ptr = Amalloc(new_size, alloc_failmode);
|
||||
if (new_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
memcpy( new_ptr, c_old, old_size );
|
||||
Afree(c_old,old_size); // Mostly done to keep stats accurate
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
|
||||
// Determine if pointer belongs to this Arena or not.
|
||||
bool Arena::contains( const void *ptr ) const {
|
||||
#ifdef ASSERT
|
||||
if (UseMallocOnly) {
|
||||
// really slow, but not easy to make fast
|
||||
if (_chunk == NULL) return false;
|
||||
char** bottom = (char**)_chunk->bottom();
|
||||
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
|
||||
if (*p == ptr) return true;
|
||||
}
|
||||
for (Chunk *c = _first; c != NULL; c = c->next()) {
|
||||
if (c == _chunk) continue; // current chunk has been processed
|
||||
char** bottom = (char**)c->bottom();
|
||||
for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
|
||||
if (*p == ptr) return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
|
||||
return true; // Check for in this chunk
|
||||
for (Chunk *c = _first; c; c = c->next()) {
|
||||
if (c == _chunk) continue; // current chunk has been processed
|
||||
if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
|
||||
return true; // Check for every chunk in Arena
|
||||
}
|
||||
}
|
||||
return false; // Not in any Chunk, so not in Arena
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void* Arena::malloc(size_t size) {
|
||||
assert(UseMallocOnly, "shouldn't call");
|
||||
// use malloc, but save pointer in res. area for later freeing
|
||||
char** save = (char**)internal_malloc_4(sizeof(char*));
|
||||
return (*save = (char*)os::malloc(size, mtChunk));
|
||||
}
|
||||
|
||||
// for debugging with UseMallocOnly
|
||||
void* Arena::internal_malloc_4(size_t x) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
check_for_overflow(x, "Arena::internal_malloc_4");
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// Non-product code
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
julong Arena::_bytes_allocated = 0;
|
||||
|
||||
void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
|
||||
|
||||
// debugging code
|
||||
inline void Arena::free_all(char** start, char** end) {
|
||||
for (char** p = start; p < end; p++) if (*p) os::free(*p);
|
||||
}
|
||||
|
||||
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
|
||||
assert(UseMallocOnly, "should not call");
|
||||
// free all objects malloced since resource mark was created; resource area
|
||||
// contains their addresses
|
||||
if (chunk->next()) {
|
||||
// this chunk is full, and some others too
|
||||
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
|
||||
char* top = c->top();
|
||||
if (c->next() == NULL) {
|
||||
top = hwm2; // last junk is only used up to hwm2
|
||||
assert(c->contains(hwm2), "bad hwm2");
|
||||
}
|
||||
free_all((char**)c->bottom(), (char**)top);
|
||||
}
|
||||
assert(chunk->contains(hwm), "bad hwm");
|
||||
assert(chunk->contains(max), "bad max");
|
||||
free_all((char**)hwm, (char**)max);
|
||||
} else {
|
||||
// this chunk was partially used
|
||||
assert(chunk->contains(hwm), "bad hwm");
|
||||
assert(chunk->contains(hwm2), "bad hwm2");
|
||||
free_all((char**)hwm, (char**)hwm2);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // Non-product
|
253
hotspot/src/share/vm/memory/arena.hpp
Normal file
253
hotspot/src/share/vm/memory/arena.hpp
Normal file
@ -0,0 +1,253 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_ARENA_HPP
|
||||
#define SHARE_VM_ARENA_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.
|
||||
// Note: this value must be a power of 2
|
||||
|
||||
#define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
|
||||
|
||||
#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
|
||||
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
|
||||
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
|
||||
|
||||
//------------------------------Chunk------------------------------------------
|
||||
// Linked list of raw memory chunks
|
||||
class Chunk: CHeapObj<mtChunk> {
|
||||
|
||||
private:
|
||||
Chunk* _next; // Next Chunk in list
|
||||
const size_t _len; // Size of this Chunk
|
||||
public:
|
||||
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
|
||||
void operator delete(void* p);
|
||||
Chunk(size_t length);
|
||||
|
||||
enum {
|
||||
// default sizes; make them slightly smaller than 2**k to guard against
|
||||
// buddy-system style malloc implementations
|
||||
#ifdef _LP64
|
||||
slack = 40, // [RGV] Not sure if this is right, but make it
|
||||
// a multiple of 8.
|
||||
#else
|
||||
slack = 20, // suspected sizeof(Chunk) + internal malloc headers
|
||||
#endif
|
||||
|
||||
tiny_size = 256 - slack, // Size of first chunk (tiny)
|
||||
init_size = 1*K - slack, // Size of first chunk (normal aka small)
|
||||
medium_size= 10*K - slack, // Size of medium-sized chunk
|
||||
size = 32*K - slack, // Default size of an Arena chunk (following the first)
|
||||
non_pool_size = init_size + 32 // An initial size which is not one of above
|
||||
};
|
||||
|
||||
void chop(); // Chop this chunk
|
||||
void next_chop(); // Chop next chunk
|
||||
static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
|
||||
static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
|
||||
|
||||
size_t length() const { return _len; }
|
||||
Chunk* next() const { return _next; }
|
||||
void set_next(Chunk* n) { _next = n; }
|
||||
// Boundaries of data area (possibly unused)
|
||||
char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
|
||||
char* top() const { return bottom() + _len; }
|
||||
bool contains(char* p) const { return bottom() <= p && p <= top(); }
|
||||
|
||||
// Start the chunk_pool cleaner task
|
||||
static void start_chunk_pool_cleaner_task();
|
||||
|
||||
static void clean_chunk_pool();
|
||||
};
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
// Fast allocation of memory
|
||||
class Arena : public CHeapObj<mtNone> {
|
||||
protected:
|
||||
friend class ResourceMark;
|
||||
friend class HandleMark;
|
||||
friend class NoHandleMark;
|
||||
friend class VMStructs;
|
||||
|
||||
MEMFLAGS _flags; // Memory tracking flags
|
||||
|
||||
Chunk *_first; // First chunk
|
||||
Chunk *_chunk; // current chunk
|
||||
char *_hwm, *_max; // High water mark and max in current chunk
|
||||
// Get a new Chunk of at least size x
|
||||
void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
size_t _size_in_bytes; // Size of arena (used for native memory tracking)
|
||||
|
||||
NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
|
||||
friend class AllocStats;
|
||||
debug_only(void* malloc(size_t size);)
|
||||
debug_only(void* internal_malloc_4(size_t x);)
|
||||
NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
|
||||
|
||||
void signal_out_of_memory(size_t request, const char* whence) const;
|
||||
|
||||
bool check_for_overflow(size_t request, const char* whence,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
|
||||
if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
|
||||
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
|
||||
return false;
|
||||
}
|
||||
signal_out_of_memory(request, whence);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
Arena(MEMFLAGS memflag);
|
||||
Arena(MEMFLAGS memflag, size_t init_size);
|
||||
~Arena();
|
||||
void destruct_contents();
|
||||
char* hwm() const { return _hwm; }
|
||||
|
||||
// new operators
|
||||
void* operator new (size_t size) throw();
|
||||
void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
|
||||
|
||||
// dynamic memory type tagging
|
||||
void* operator new(size_t size, MEMFLAGS flags) throw();
|
||||
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
|
||||
void operator delete(void* p);
|
||||
|
||||
// Fast allocate in the arena. Common case is: pointer test + increment.
|
||||
void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
|
||||
x = ARENA_ALIGN(x);
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
// Further assume size is padded out to words
|
||||
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate with 'double' alignment. It is 8 bytes on sparc.
|
||||
// In other cases Amalloc_D() should be the same as Amalloc_4().
|
||||
void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
#if defined(SPARC) && !defined(_LP64)
|
||||
#define DALIGN_M1 7
|
||||
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
|
||||
x += delta;
|
||||
#endif
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
|
||||
} else {
|
||||
char *old = _hwm;
|
||||
_hwm += x;
|
||||
#if defined(SPARC) && !defined(_LP64)
|
||||
old += delta; // align to 8-bytes
|
||||
#endif
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
||||
// Fast delete in area. Common case is: NOP (except for storage reclaimed)
|
||||
void Afree(void *ptr, size_t size) {
|
||||
#ifdef ASSERT
|
||||
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
|
||||
if (UseMallocOnly) return;
|
||||
#endif
|
||||
if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
|
||||
}
|
||||
|
||||
void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
|
||||
// Move contents of this arena into an empty arena
|
||||
Arena *move_contents(Arena *empty_arena);
|
||||
|
||||
// Determine if pointer belongs to this Arena or not.
|
||||
bool contains( const void *ptr ) const;
|
||||
|
||||
// Total of all chunks in use (not thread-safe)
|
||||
size_t used() const;
|
||||
|
||||
// Total # of bytes used
|
||||
size_t size_in_bytes() const { return _size_in_bytes; };
|
||||
void set_size_in_bytes(size_t size);
|
||||
|
||||
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
|
||||
static void free_all(char** start, char** end) PRODUCT_RETURN;
|
||||
|
||||
private:
|
||||
// Reset this Arena to empty, access will trigger grow if necessary
|
||||
void reset(void) {
|
||||
_first = _chunk = NULL;
|
||||
_hwm = _max = NULL;
|
||||
set_size_in_bytes(0);
|
||||
}
|
||||
};
|
||||
|
||||
// One of the following macros must be used when allocating
|
||||
// an array or object from an arena
|
||||
#define NEW_ARENA_ARRAY(arena, type, size) \
|
||||
(type*) (arena)->Amalloc((size) * sizeof(type))
|
||||
|
||||
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
|
||||
(type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
|
||||
(new_size) * sizeof(type) )
|
||||
|
||||
#define FREE_ARENA_ARRAY(arena, type, old, size) \
|
||||
(arena)->Afree((char*)(old), (size) * sizeof(type))
|
||||
|
||||
#define NEW_ARENA_OBJ(arena, type) \
|
||||
NEW_ARENA_ARRAY(arena, type, 1)
|
||||
|
||||
#endif // SHARE_VM_ARENA_HPP
|
@ -131,6 +131,7 @@
|
||||
# include "logging/log.hpp"
|
||||
# include "memory/allocation.hpp"
|
||||
# include "memory/allocation.inline.hpp"
|
||||
# include "memory/arena.hpp"
|
||||
# include "memory/heap.hpp"
|
||||
# include "memory/iterator.hpp"
|
||||
# include "memory/memRegion.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,13 @@
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1_globals.hpp"
|
||||
#endif
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_globals.hpp"
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
#include "opto/c2_globals.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
class CommandLineFlagConstraint_bool : public CommandLineFlagConstraint {
|
||||
CommandLineFlagConstraintFunc_bool _constraint;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_RUNTIME_HANDLES_HPP
|
||||
#define SHARE_VM_RUNTIME_HANDLES_HPP
|
||||
|
||||
#include "memory/arena.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
|
@ -971,18 +971,6 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
|
||||
unchecked_c1_static_field(Runtime1, _blobs, sizeof(Runtime1::_blobs)) /* NOTE: no type */ \
|
||||
\
|
||||
/**************/ \
|
||||
/* allocation */ \
|
||||
/**************/ \
|
||||
\
|
||||
nonstatic_field(Chunk, _next, Chunk*) \
|
||||
nonstatic_field(Chunk, _len, const size_t) \
|
||||
\
|
||||
nonstatic_field(Arena, _first, Chunk*) \
|
||||
nonstatic_field(Arena, _chunk, Chunk*) \
|
||||
nonstatic_field(Arena, _hwm, char*) \
|
||||
nonstatic_field(Arena, _max, char*) \
|
||||
\
|
||||
/************/ \
|
||||
/* CI */ \
|
||||
/************/ \
|
||||
\
|
||||
@ -1560,7 +1548,6 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
|
||||
declare_toplevel_type(GrowableArray<int>) \
|
||||
declare_toplevel_type(Arena) \
|
||||
declare_type(ResourceArea, Arena) \
|
||||
declare_toplevel_type(Chunk) \
|
||||
\
|
||||
declare_toplevel_type(SymbolCompactHashTable) \
|
||||
\
|
||||
|
Loading…
Reference in New Issue
Block a user