8301070: Replace NULL with nullptr in share/memory/

Reviewed-by: stefank, stuefe
This commit is contained in:
Johan Sjölen 2023-01-26 16:30:31 +00:00
parent 315398c245
commit d98a323a8b
65 changed files with 678 additions and 678 deletions

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ char* AllocateHeap(size_t size,
const NativeCallStack& stack,
AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
char* p = (char*) os::malloc(size, flags, stack);
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
}
return p;
@ -57,19 +57,19 @@ char* ReallocateHeap(char *old,
MEMFLAGS flag,
AllocFailType alloc_failmode) {
char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
}
return p;
}
// handles NULL pointers
// handles null pointers
void FreeHeap(void* p) {
os::free(p);
}
void* MetaspaceObj::_shared_metaspace_base = NULL;
void* MetaspaceObj::_shared_metaspace_top = NULL;
void* MetaspaceObj::_shared_metaspace_base = nullptr;
void* MetaspaceObj::_shared_metaspace_top = nullptr;
void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
@ -132,7 +132,7 @@ void* AnyObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
MEMFLAGS flags) throw() {
// should only call this with std::nothrow, use other operator new() otherwise
address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
return res;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -174,7 +174,7 @@ char* ReallocateHeap(char *old,
MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// handles NULL pointers
// handles null pointers
void FreeHeap(void* p);
class CHeapObjBase {
@ -321,7 +321,7 @@ class MetaspaceObj {
// into a single contiguous memory block, so we can use these
// two pointers to quickly determine if something is in the
// shared metaspace.
// When CDS is not enabled, both pointers are set to NULL.
// When CDS is not enabled, both pointers are set to null.
static void* _shared_metaspace_base; // (inclusive) low address
static void* _shared_metaspace_top; // (exclusive) high address
@ -335,7 +335,7 @@ class MetaspaceObj {
#if INCLUDE_CDS
static bool is_shared(const MetaspaceObj* p) {
// If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will
// both be NULL and all values of p will be rejected quickly.
// both be null and all values of p will be rejected quickly.
return (((void*)p) < _shared_metaspace_top &&
((void*)p) >= _shared_metaspace_base);
}
@ -386,7 +386,7 @@ class MetaspaceObj {
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
@ -511,7 +511,7 @@ protected:
}
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
DEBUG_ONLY(if (res != nullptr) set_allocation_type(res, RESOURCE_AREA);)
return res;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,15 +59,15 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
size_t size = size_for(length);
char* addr = os::reserve_memory(size, !ExecMem, flags);
if (addr == NULL) {
return NULL;
if (addr == nullptr) {
return nullptr;
}
if (os::commit_memory(addr, size, !ExecMem)) {
return (E*)addr;
} else {
os::release_memory(addr, size);
return NULL;
return nullptr;
}
}
@ -76,7 +76,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
size_t size = size_for(length);
char* addr = os::reserve_memory(size, !ExecMem, flags);
if (addr == NULL) {
if (addr == nullptr) {
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
@ -148,13 +148,13 @@ E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_leng
E* new_addr = (new_length > 0)
? allocate(new_length, flags)
: NULL;
: nullptr;
if (new_addr != NULL && old_addr != NULL) {
if (new_addr != nullptr && old_addr != nullptr) {
memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
}
if (old_addr != NULL) {
if (old_addr != nullptr) {
free(old_addr, old_length);
}
@ -173,7 +173,7 @@ void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
template <class E>
void ArrayAllocator<E>::free(E* addr, size_t length) {
if (addr != NULL) {
if (addr != nullptr) {
if (should_use_malloc(length)) {
free_malloc(addr, length);
} else {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,9 +56,9 @@ class ChunkPool {
static ChunkPool _pools[_num_pools];
public:
ChunkPool(size_t size) : _first(NULL), _num_chunks(0), _size(size) {}
ChunkPool(size_t size) : _first(nullptr), _num_chunks(0), _size(size) {}
// Allocate a chunk from the pool; returns NULL if pool is empty.
// Allocate a chunk from the pool; returns null if pool is empty.
Chunk* allocate() {
ThreadCritical tc;
Chunk* c = _first;
@ -81,7 +81,7 @@ class ChunkPool {
// Prune the pool
void prune() {
static const int blocksToKeep = 5;
Chunk* cur = NULL;
Chunk* cur = nullptr;
Chunk* next;
// if we have more than n chunks, free all of them
ThreadCritical tc;
@ -89,18 +89,18 @@ class ChunkPool {
// free chunks at end of queue, for better locality
cur = _first;
for (size_t i = 0; i < (blocksToKeep - 1); i++) {
assert(cur != NULL, "counter out of sync?");
assert(cur != nullptr, "counter out of sync?");
cur = cur->next();
}
assert(cur != NULL, "counter out of sync?");
assert(cur != nullptr, "counter out of sync?");
next = cur->next();
cur->set_next(NULL);
cur->set_next(nullptr);
cur = next;
// Free all remaining chunks while in ThreadCritical lock
// so NMT adjustment is stable.
while(cur != NULL) {
while(cur != nullptr) {
next = cur->next();
os::free(cur);
_num_chunks--;
@ -115,14 +115,14 @@ class ChunkPool {
}
}
// Given a (inner payload) size, return the pool responsible for it, or NULL if the size is non-standard
// Given a (inner payload) size, return the pool responsible for it, or null if the size is non-standard
static ChunkPool* get_pool_for_size(size_t size) {
for (int i = 0; i < _num_pools; i++) {
if (_pools[i]._size == size) {
return _pools + i;
}
}
return NULL;
return nullptr;
}
};
@ -170,9 +170,9 @@ void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, siz
SIZE_FORMAT ".", length);
// Try to reuse a freed chunk from the pool
ChunkPool* pool = ChunkPool::get_pool_for_size(length);
if (pool != NULL) {
if (pool != nullptr) {
Chunk* c = pool->allocate();
if (c != NULL) {
if (c != nullptr) {
assert(c->length() == length, "wrong length?");
return c;
}
@ -180,7 +180,7 @@ void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, siz
// Either the pool was empty, or this is a non-standard length. Allocate a new Chunk from C-heap.
size_t bytes = ARENA_ALIGN(sizeofChunk) + length;
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
}
// We rely on arena alignment <= malloc alignment.
@ -192,7 +192,7 @@ void Chunk::operator delete(void* p) {
// If this is a standard-sized chunk, return it to its pool; otherwise free it.
Chunk* c = (Chunk*)p;
ChunkPool* pool = ChunkPool::get_pool_for_size(c->length());
if (pool != NULL) {
if (pool != nullptr) {
pool->free(c);
} else {
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
@ -201,7 +201,7 @@ void Chunk::operator delete(void* p) {
}
Chunk::Chunk(size_t length) : _len(length) {
_next = NULL; // Chain on the linked list
_next = nullptr; // Chain on the linked list
}
void Chunk::chop() {
@ -217,7 +217,7 @@ void Chunk::chop() {
void Chunk::next_chop() {
_next->chop();
_next = NULL;
_next = nullptr;
}
void Chunk::start_chunk_pool_cleaner_task() {
@ -276,7 +276,7 @@ void Arena::destruct_contents() {
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
set_size_in_bytes(0);
if (_first != NULL) {
if (_first != nullptr) {
_first->chop();
}
reset();
@ -312,9 +312,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
Chunk *k = _chunk; // Get filled-up chunk address
_chunk = new (alloc_failmode, len) Chunk(len);
if (_chunk == NULL) {
if (_chunk == nullptr) {
_chunk = k; // restore the previous value of _chunk
return NULL;
return nullptr;
}
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
else _first = _chunk;
@ -332,11 +332,11 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
if (new_size == 0) {
Afree(old_ptr, old_size); // like realloc(3)
return NULL;
return nullptr;
}
if (old_ptr == NULL) {
if (old_ptr == nullptr) {
assert(old_size == 0, "sanity");
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a null old ptr is equivalent to malloc(3)
}
char *c_old = (char*)old_ptr; // Handy name
// Stupid fast special case
@ -358,8 +358,8 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
// Oops, got to relocate guts
void *new_ptr = Amalloc(new_size, alloc_failmode);
if (new_ptr == NULL) {
return NULL;
if (new_ptr == nullptr) {
return nullptr;
}
memcpy( new_ptr, c_old, old_size );
Afree(c_old,old_size); // Mostly done to keep stats accurate
@ -369,7 +369,7 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
// Determine if pointer belongs to this Arena or not.
bool Arena::contains( const void *ptr ) const {
if (_chunk == NULL) return false;
if (_chunk == nullptr) return false;
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
return true; // Check for in this chunk
for (Chunk *c = _first; c; c = c->next()) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,8 +140,8 @@ protected:
// Fast delete in area. Common case is: NOP (except for storage reclaimed)
bool Afree(void *ptr, size_t size) {
if (ptr == NULL) {
return true; // as with free(3), freeing NULL is a noop.
if (ptr == nullptr) {
return true; // as with free(3), freeing null is a noop.
}
#ifdef ASSERT
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
@ -174,8 +174,8 @@ protected:
private:
// Reset this Arena to empty, access will trigger grow if necessary
void reset(void) {
_first = _chunk = NULL;
_hwm = _max = NULL;
_first = _chunk = nullptr;
_hwm = _max = nullptr;
set_size_in_bytes(0);
}
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -50,8 +50,8 @@ using metaspace::InternalStats;
ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type) :
_lock(lock),
_space_type(space_type),
_non_class_space_arena(NULL),
_class_space_arena(NULL)
_non_class_space_arena(nullptr),
_class_space_arena(nullptr)
{
ChunkManager* const non_class_cm =
ChunkManager::chunkmanager_nonclass();
@ -98,7 +98,7 @@ MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataTy
}
// Attempt to expand the GC threshold to be good for at least another word_size words
// and allocate. Returns NULL if failure. Used during Metaspace GC.
// and allocate. Returns null if failure. Used during Metaspace GC.
MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdType) {
size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
assert(delta_bytes > 0, "Must be");
@ -115,7 +115,7 @@ MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace:
do {
incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
res = allocate(word_size, mdType);
} while (!incremented && res == NULL && can_retry);
} while (!incremented && res == nullptr && can_retry);
if (incremented) {
Metaspace::tracer()->report_gc_threshold(before, after,
@ -141,20 +141,20 @@ void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_c
// Update statistics. This walks all in-use chunks.
void ClassLoaderMetaspace::add_to_statistics(metaspace::ClmsStats* out) const {
if (non_class_space_arena() != NULL) {
if (non_class_space_arena() != nullptr) {
non_class_space_arena()->add_to_statistics(&out->_arena_stats_nonclass);
}
if (class_space_arena() != NULL) {
if (class_space_arena() != nullptr) {
class_space_arena()->add_to_statistics(&out->_arena_stats_class);
}
}
#ifdef ASSERT
void ClassLoaderMetaspace::verify() const {
if (non_class_space_arena() != NULL) {
if (non_class_space_arena() != nullptr) {
non_class_space_arena()->verify();
}
if (class_space_arena() != NULL) {
if (class_space_arena() != nullptr) {
class_space_arena()->verify();
}
}
@ -167,16 +167,16 @@ void ClassLoaderMetaspace::verify() const {
void ClassLoaderMetaspace::calculate_jfr_stats(size_t* p_used_bytes, size_t* p_capacity_bytes) const {
// Implement this using the standard statistics objects.
size_t used_c = 0, cap_c = 0, used_nc = 0, cap_nc = 0;
if (non_class_space_arena() != NULL) {
non_class_space_arena()->usage_numbers(&used_nc, NULL, &cap_nc);
if (non_class_space_arena() != nullptr) {
non_class_space_arena()->usage_numbers(&used_nc, nullptr, &cap_nc);
}
if (class_space_arena() != NULL) {
class_space_arena()->usage_numbers(&used_c, NULL, &cap_c);
if (class_space_arena() != nullptr) {
class_space_arena()->usage_numbers(&used_c, nullptr, &cap_c);
}
if (p_used_bytes != NULL) {
if (p_used_bytes != nullptr) {
*p_used_bytes = used_c + used_nc;
}
if (p_capacity_bytes != NULL) {
if (p_capacity_bytes != nullptr) {
*p_capacity_bytes = cap_c + cap_nc;
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ class ClassLoaderMetaspace : public CHeapObj<mtClass> {
metaspace::MetaspaceArena* _non_class_space_arena;
// Arena for allocations from class space
// (NULL if -XX:-UseCompressedClassPointers).
// (null if -XX:-UseCompressedClassPointers).
metaspace::MetaspaceArena* _class_space_arena;
Mutex* lock() const { return _lock; }
@ -87,7 +87,7 @@ public:
MetaWord* allocate(size_t word_size, Metaspace::MetadataType mdType);
// Attempt to expand the GC threshold to be good for at least another word_size words
// and allocate. Returns NULL if failure. Used during Metaspace GC.
// and allocate. Returns null if failure. Used during Metaspace GC.
MetaWord* expand_and_allocate(size_t word_size, Metaspace::MetadataType mdType);
// Prematurely returns a metaspace allocation to the _block_freelists

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,17 +30,17 @@
void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) {
size_t total_sz = GuardedMemory::get_total_size(len);
void* outerp = os::malloc(total_sz, mtInternal);
if (outerp != NULL) {
if (outerp != nullptr) {
GuardedMemory guarded(outerp, len, tag);
void* innerp = guarded.get_user_ptr();
memcpy(innerp, ptr, len);
return innerp;
}
return NULL; // OOM
return nullptr; // OOM
}
bool GuardedMemory::free_copy(void* p) {
if (p == NULL) {
if (p == nullptr) {
return true;
}
GuardedMemory guarded((u_char*)p);
@ -53,7 +53,7 @@ bool GuardedMemory::free_copy(void* p) {
}
void GuardedMemory::print_on(outputStream* st) const {
if (_base_addr == NULL) {
if (_base_addr == nullptr) {
st->print_cr("GuardedMemory(" PTR_FORMAT ") not associated to any memory", p2i(this));
return;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -163,7 +163,7 @@ protected:
* @param user_size the size of the user data to be wrapped.
* @param tag optional general purpose tag.
*/
GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = nullptr) {
wrap_with_guards(base_ptr, user_size, tag);
}
@ -192,8 +192,8 @@ protected:
*
* @return user data pointer (inner pointer to supplied "base_ptr").
*/
void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = nullptr) {
assert(base_ptr != nullptr, "Attempt to wrap null with memory guard");
_base_addr = (u_char*)base_ptr;
get_head_guard()->build();
get_head_guard()->set_user_size(user_size);
@ -210,7 +210,7 @@ protected:
* @return true if guards are intact, false would indicate a buffer overrun.
*/
bool verify_guards() const {
if (_base_addr != NULL) {
if (_base_addr != nullptr) {
return (get_head_guard()->verify() && get_tail_guard()->verify());
}
return false;
@ -226,7 +226,7 @@ protected:
/**
* Return the general purpose tag.
*
* @return the general purpose tag, defaults to NULL.
* @return the general purpose tag, defaults to null.
*/
void* get_tag() const { return get_head_guard()->get_tag(); }
@ -236,7 +236,7 @@ protected:
* @return the size of the user data.
*/
size_t get_user_size() const {
assert(_base_addr != NULL, "Not wrapping any memory");
assert(_base_addr != nullptr, "Not wrapping any memory");
return get_head_guard()->get_user_size();
}
@ -246,7 +246,7 @@ protected:
* @return the user data pointer.
*/
u_char* get_user_ptr() const {
assert(_base_addr != NULL, "Not wrapping any memory");
assert(_base_addr != nullptr, "Not wrapping any memory");
return _base_addr + sizeof(GuardHeader);
}
@ -269,7 +269,7 @@ protected:
*/
void* release() {
void* p = (void*) _base_addr;
_base_addr = NULL;
_base_addr = nullptr;
return p;
}
@ -303,9 +303,9 @@ protected:
* @param len the length of the copy
* @param tag optional general purpose tag (see GuardedMemory::get_tag())
*
* @return guarded wrapped memory pointer to the user area, or NULL if OOM.
* @return guarded wrapped memory pointer to the user area, or null if OOM.
*/
static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
static void* wrap_copy(const void* p, const size_t len, const void* tag = nullptr);
/**
* Free wrapped copy.

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,8 +41,8 @@ CodeHeap::CodeHeap(const char* name, const CodeBlobType code_blob_type)
_segment_size = 0;
_log2_segment_size = 0;
_next_segment = 0;
_freelist = NULL;
_last_insert_point = NULL;
_freelist = nullptr;
_last_insert_point = nullptr;
_freelist_segments = 0;
_freelist_length = 0;
_max_allocated_capacity = 0;
@ -285,7 +285,7 @@ void* CodeHeap::allocate(size_t instance_size) {
HeapBlock* block = search_freelist(number_of_segments);
NOT_PRODUCT(verify());
if (block != NULL) {
if (block != nullptr) {
assert(!block->free(), "must not be marked free");
guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
"The newly allocated block " PTR_FORMAT " is not within the heap "
@ -312,7 +312,7 @@ void* CodeHeap::allocate(size_t instance_size) {
_blob_count++;
return block->allocated_space();
} else {
return NULL;
return nullptr;
}
}
@ -326,7 +326,7 @@ void* CodeHeap::allocate(size_t instance_size) {
// where the split happens. The segment with relative
// number split_at is the first segment of the split-off block.
HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
if (b == NULL) return NULL;
if (b == nullptr) return nullptr;
// After the split, both blocks must have a size of at least CodeCacheMinBlockLength
assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
"split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
@ -445,11 +445,11 @@ void CodeHeap::deallocate(void* p) {
// Find block which contains the passed pointer,
// regardless of the block being used or free.
// NULL is returned if anything invalid is detected.
// null is returned if anything invalid is detected.
void* CodeHeap::find_block_for(void* p) const {
// Check the pointer to be in committed range.
if (!contains(p)) {
return NULL;
return nullptr;
}
address seg_map = (address)_segmap.low();
@ -458,7 +458,7 @@ void* CodeHeap::find_block_for(void* p) const {
// This may happen in special cases. Just ignore.
// Example: PPC ICache stub generation.
if (is_segment_unused(seg_map[seg_idx])) {
return NULL;
return nullptr;
}
// Iterate the segment map chain to find the start of the block.
@ -478,14 +478,14 @@ void* CodeHeap::find_block_for(void* p) const {
// Return a pointer that points past the block header.
void* CodeHeap::find_start(void* p) const {
HeapBlock* h = (HeapBlock*)find_block_for(p);
return ((h == NULL) || h->free()) ? NULL : h->allocated_space();
return ((h == nullptr) || h->free()) ? nullptr : h->allocated_space();
}
// Find block which contains the passed pointer.
// Same as find_start(p), but with additional safety net.
CodeBlob* CodeHeap::find_blob(void* start) const {
CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start);
return (result != NULL && result->blob_contains((address)start)) ? result : NULL;
return (result != nullptr && result->blob_contains((address)start)) ? result : nullptr;
}
size_t CodeHeap::alignment_unit() const {
@ -501,15 +501,15 @@ size_t CodeHeap::alignment_offset() const {
}
// Returns the current block if available and used.
// If not, it returns the subsequent block (if available), NULL otherwise.
// If not, it returns the subsequent block (if available), null otherwise.
// Free blocks are merged, therefore there is at most one free block
// between two used ones. As a result, the subsequent block (if available) is
// guaranteed to be used.
// The returned pointer points past the block header.
void* CodeHeap::next_used(HeapBlock* b) const {
if (b != NULL && b->free()) b = next_block(b);
assert(b == NULL || !b->free(), "must be in use or at end of heap");
return (b == NULL) ? NULL : b->allocated_space();
if (b != nullptr && b->free()) b = next_block(b);
assert(b == nullptr || !b->free(), "must be in use or at end of heap");
return (b == nullptr) ? nullptr : b->allocated_space();
}
// Returns the first used HeapBlock
@ -517,24 +517,24 @@ void* CodeHeap::next_used(HeapBlock* b) const {
HeapBlock* CodeHeap::first_block() const {
if (_next_segment > 0)
return block_at(0);
return NULL;
return nullptr;
}
// The returned pointer points to the block header.
HeapBlock* CodeHeap::block_start(void* q) const {
HeapBlock* b = (HeapBlock*)find_start(q);
if (b == NULL) return NULL;
if (b == nullptr) return nullptr;
return b - 1;
}
// Returns the next Heap block.
// The returned pointer points to the block header.
HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
if (b == NULL) return NULL;
if (b == nullptr) return nullptr;
size_t i = segment_for(b) + b->length();
if (i < _next_segment)
return block_at(i);
return NULL;
return nullptr;
}
@ -570,7 +570,7 @@ FreeBlock* CodeHeap::following_block(FreeBlock *b) {
// Inserts block b after a
void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
assert(a != NULL && b != NULL, "must be real pointers");
assert(a != nullptr && b != nullptr, "must be real pointers");
// Link b into the list after a
b->set_link(a->link());
@ -585,7 +585,7 @@ void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
bool CodeHeap::merge_right(FreeBlock* a) {
assert(a->free(), "must be a free block");
if (following_block(a) == a->link()) {
assert(a->link() != NULL && a->link()->free(), "must be free too");
assert(a->link() != nullptr && a->link()->free(), "must be free too");
// Remember linked (following) block. invalidate should only zap header of this block.
size_t follower = segment_for(a->link());
@ -623,8 +623,8 @@ void CodeHeap::add_to_freelist(HeapBlock* a) {
invalidate(bseg, bseg + b->length(), sizeof(FreeBlock));
// First element in list?
if (_freelist == NULL) {
b->set_link(NULL);
if (_freelist == nullptr) {
b->set_link(nullptr);
_freelist = b;
return;
}
@ -644,40 +644,40 @@ void CodeHeap::add_to_freelist(HeapBlock* a) {
// List is sorted by increasing addresses.
FreeBlock* prev = _freelist;
FreeBlock* cur = _freelist->link();
if ((_freelist_length > freelist_limit) && (_last_insert_point != NULL)) {
if ((_freelist_length > freelist_limit) && (_last_insert_point != nullptr)) {
_last_insert_point = (FreeBlock*)find_block_for(_last_insert_point);
if ((_last_insert_point != NULL) && _last_insert_point->free() && (_last_insert_point < b)) {
if ((_last_insert_point != nullptr) && _last_insert_point->free() && (_last_insert_point < b)) {
prev = _last_insert_point;
cur = prev->link();
}
}
while(cur != NULL && cur < b) {
while(cur != nullptr && cur < b) {
assert(prev < cur, "Freelist must be ordered");
prev = cur;
cur = cur->link();
}
assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
assert((prev < b) && (cur == nullptr || b < cur), "free-list must be ordered");
insert_after(prev, b);
_last_insert_point = prev;
}
/**
* Search freelist for an entry on the list with the best fit.
* @return NULL, if no one was found
* @return null, if no one was found
*/
HeapBlock* CodeHeap::search_freelist(size_t length) {
FreeBlock* found_block = NULL;
FreeBlock* found_prev = NULL;
FreeBlock* found_block = nullptr;
FreeBlock* found_prev = nullptr;
size_t found_length = _next_segment; // max it out to begin with
HeapBlock* res = NULL;
FreeBlock* prev = NULL;
HeapBlock* res = nullptr;
FreeBlock* prev = nullptr;
FreeBlock* cur = _freelist;
length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length;
// Search for best-fitting block
while(cur != NULL) {
while(cur != nullptr) {
size_t cur_length = cur->length();
if (cur_length == length) {
// We have a perfect fit
@ -696,9 +696,9 @@ HeapBlock* CodeHeap::search_freelist(size_t length) {
cur = cur->link();
}
if (found_block == NULL) {
if (found_block == nullptr) {
// None found
return NULL;
return nullptr;
}
// Exact (or at least good enough) fit. Remove from list.
@ -706,7 +706,7 @@ HeapBlock* CodeHeap::search_freelist(size_t length) {
if (found_length - length < CodeCacheMinBlockLength) {
_freelist_length--;
length = found_length;
if (found_prev == NULL) {
if (found_prev == nullptr) {
assert(_freelist == found_block, "sanity check");
_freelist = _freelist->link();
} else {
@ -739,7 +739,7 @@ int CodeHeap::defrag_segmap(bool do_defrag) {
int extra_hops_free = 0;
int blocks_used = 0;
int blocks_free = 0;
for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
for(HeapBlock* h = first_block(); h != nullptr; h = next_block(h)) {
size_t beg = segment_for(h);
size_t end = segment_for(h) + h->length();
int extra_hops = segmap_hops(beg, end);
@ -794,7 +794,7 @@ void CodeHeap::verify() {
assert_locked_or_safepoint(CodeCache_lock);
size_t len = 0;
int count = 0;
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
for(FreeBlock* b = _freelist; b != nullptr; b = b->link()) {
len += b->length();
count++;
// Check if we have merged all free blocks
@ -803,7 +803,7 @@ void CodeHeap::verify() {
// Verify that freelist contains the right amount of free space
assert(len == _freelist_segments, "wrong freelist");
for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
for(HeapBlock* h = first_block(); h != nullptr; h = next_block(h)) {
if (h->free()) count--;
}
// Verify that the freelist contains the same number of blocks
@ -811,7 +811,7 @@ void CodeHeap::verify() {
assert(count == 0, "missing free blocks");
//---< all free block memory must have been invalidated >---
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
for(FreeBlock* b = _freelist; b != nullptr; b = b->link()) {
for (char* c = (char*)b + sizeof(FreeBlock); c < (char*)b + segments_to_size(b->length()); c++) {
assert(*c == (char)badCodeHeapNewVal, "FreeBlock@" PTR_FORMAT "(" PTR_FORMAT ") not invalidated @byte %d", p2i(b), b->length(), (int)(c - (char*)b));
}
@ -821,7 +821,7 @@ void CodeHeap::verify() {
size_t nseg = 0;
int extra_hops = 0;
count = 0;
for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) {
for(HeapBlock* b = first_block(); b != nullptr; b = next_block(b)) {
size_t seg1 = segment_for(b);
size_t segn = seg1 + b->length();
extra_hops += segmap_hops(seg1, segn);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,7 @@ class FreeBlock: public HeapBlock {
public:
// Initialization
void initialize(size_t length) { HeapBlock::initialize(length); _link= NULL; }
void initialize(size_t length) { HeapBlock::initialize(length); _link= nullptr; }
// Accessors
FreeBlock* link() const { return _link; }
@ -153,7 +153,7 @@ class CodeHeap : public CHeapObj<mtCode> {
bool expand_by(size_t size); // expands committed memory by size
// Memory allocation
void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return null
void deallocate(void* p); // Deallocate memory
// Free the tail of segments allocated by the last call to 'allocate()' which exceed 'used_size'.
// ATTENTION: this is only safe to use if there was no other call to 'allocate()' after
@ -175,7 +175,7 @@ class CodeHeap : public CHeapObj<mtCode> {
return contains((void*)blob);
}
virtual void* find_start(void* p) const; // returns the block containing p or NULL
virtual void* find_start(void* p) const; // returns the block containing p or null
virtual CodeBlob* find_blob(void* start) const;
size_t alignment_unit() const; // alignment of any block
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
@ -191,9 +191,9 @@ class CodeHeap : public CHeapObj<mtCode> {
size_t allocated_in_freelist() const { return _freelist_segments * CodeCacheSegmentSize; }
int freelist_length() const { return _freelist_length; } // number of elements in the freelist
// returns the first block or NULL
// returns the first block or null
virtual void* first() const { return next_used(first_block()); }
// returns the next block given a block p or NULL
// returns the next block given a block p or null
virtual void* next(void* p) const { return next_used(next_block(block_start(p))); }
// Statistics

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,13 +44,13 @@
// HeapInspection
inline KlassInfoEntry::~KlassInfoEntry() {
if (_subclasses != NULL) {
if (_subclasses != nullptr) {
delete _subclasses;
}
}
inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
if (_subclasses == NULL) {
if (_subclasses == nullptr) {
_subclasses = new (mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
}
_subclasses->append(cie);
@ -80,7 +80,7 @@ int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) {
const char* KlassInfoEntry::name() const {
const char* name;
if (_klass->name() != NULL) {
if (_klass->name() != nullptr) {
name = _klass->external_name();
} else {
if (_klass == Universe::boolArrayKlassObj()) name = "<boolArrayKlass>"; else
@ -107,8 +107,8 @@ void KlassInfoEntry::print_on(outputStream* st) const {
(uint64_t)_instance_words * HeapWordSize,
name(),
module->name()->as_C_string(),
module->version() != NULL ? "@" : "",
module->version() != NULL ? module->version()->as_C_string() : "");
module->version() != nullptr ? "@" : "",
module->version() != nullptr ? module->version()->as_C_string() : "");
} else {
st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s",
(int64_t)_instance_count,
@ -119,12 +119,12 @@ void KlassInfoEntry::print_on(outputStream* st) const {
KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
// Can happen if k is an archived class that we haven't loaded yet.
if (k->java_mirror_no_keepalive() == NULL) {
return NULL;
if (k->java_mirror_no_keepalive() == nullptr) {
return nullptr;
}
KlassInfoEntry* elt = _list;
while (elt != NULL) {
while (elt != nullptr) {
if (elt->is_equal(k)) {
return elt;
}
@ -132,7 +132,7 @@ KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
}
elt = new (std::nothrow) KlassInfoEntry(k, list());
// We may be out of space to allocate the new entry.
if (elt != NULL) {
if (elt != nullptr) {
set_list(elt);
}
return elt;
@ -140,7 +140,7 @@ KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
KlassInfoEntry* elt = _list;
while (elt != NULL) {
while (elt != nullptr) {
cic->do_cinfo(elt);
elt = elt->next();
}
@ -148,8 +148,8 @@ void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
void KlassInfoBucket::empty() {
KlassInfoEntry* elt = _list;
_list = NULL;
while (elt != NULL) {
_list = nullptr;
while (elt != nullptr) {
KlassInfoEntry* next = elt->next();
delete elt;
elt = next;
@ -174,7 +174,7 @@ KlassInfoTable::KlassInfoTable(bool add_all_classes) {
_buckets =
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_buckets != NULL) {
if (_buckets != nullptr) {
for (int index = 0; index < _num_buckets; index++) {
_buckets[index].initialize();
}
@ -186,12 +186,12 @@ KlassInfoTable::KlassInfoTable(bool add_all_classes) {
}
KlassInfoTable::~KlassInfoTable() {
if (_buckets != NULL) {
if (_buckets != nullptr) {
for (int index = 0; index < _num_buckets; index++) {
_buckets[index].empty();
}
FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
_buckets = NULL;
_buckets = nullptr;
}
}
@ -201,12 +201,12 @@ uint KlassInfoTable::hash(const Klass* p) {
KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
uint idx = hash(k) % _num_buckets;
assert(_buckets != NULL, "Allocation failure should have been caught");
assert(_buckets != nullptr, "Allocation failure should have been caught");
KlassInfoEntry* e = _buckets[idx].lookup(k);
// Lookup may fail if this is a new klass for which we
// could not allocate space for an new entry, or if it's
// an archived class that we haven't loaded yet.
assert(e == NULL || k == e->klass(), "must be equal");
assert(e == nullptr || k == e->klass(), "must be equal");
return e;
}
@ -215,9 +215,9 @@ KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
bool KlassInfoTable::record_instance(const oop obj) {
Klass* k = obj->klass();
KlassInfoEntry* elt = lookup(k);
// elt may be NULL if it's a new klass for which we
// elt may be null if it's a new klass for which we
// could not allocate space for a new entry in the hashtable.
if (elt != NULL) {
if (elt != nullptr) {
elt->set_count(elt->count() + 1);
elt->set_words(elt->words() + obj->size());
_size_of_instances_in_words += obj->size();
@ -228,7 +228,7 @@ bool KlassInfoTable::record_instance(const oop obj) {
}
void KlassInfoTable::iterate(KlassInfoClosure* cic) {
assert(_buckets != NULL, "Allocation failure should have been caught");
assert(_buckets != nullptr, "Allocation failure should have been caught");
for (int index = 0; index < _num_buckets; index++) {
_buckets[index].iterate(cic);
}
@ -243,9 +243,9 @@ size_t KlassInfoTable::size_of_instances_in_words() const {
bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
Klass* k = cie->klass();
KlassInfoEntry* elt = lookup(k);
// elt may be NULL if it's a new klass for which we
// elt may be null if it's a new klass for which we
// could not allocate space for a new entry in the hashtable.
if (elt != NULL) {
if (elt != nullptr) {
elt->set_count(elt->count() + cie->count());
elt->set_words(elt->words() + cie->words());
_size_of_instances_in_words += cie->words();
@ -349,9 +349,9 @@ void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfac
cie->set_index(i + 1);
// Add the class to the subclass array of its superclass.
if (super != NULL) {
if (super != nullptr) {
KlassInfoEntry* super_cie = cit.lookup(super);
assert(super_cie != NULL, "could not lookup superclass");
assert(super_cie != nullptr, "could not lookup superclass");
super_cie->add_subclass(cie);
}
}
@ -359,7 +359,7 @@ void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfac
// Set the do_print flag for each class that should be printed.
for(int i = 0; i < elements.length(); i++) {
KlassInfoEntry* cie = elements.at(i);
if (classname == NULL) {
if (classname == nullptr) {
// We are printing all classes.
cie->set_do_print(true);
} else {
@ -374,7 +374,7 @@ void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfac
// maintain the list of classes we still need to process. Start things off
// by priming it with java.lang.Object.
KlassInfoEntry* jlo_cie = cit.lookup(vmClasses::Object_klass());
assert(jlo_cie != NULL, "could not lookup java.lang.Object");
assert(jlo_cie != nullptr, "could not lookup java.lang.Object");
class_stack.push(jlo_cie);
// Repeatedly pop the top item off the stack, print its class info,
@ -384,7 +384,7 @@ void KlassHierarchy::print_class_hierarchy(outputStream* st, bool print_interfac
KlassInfoEntry* curr_cie = class_stack.pop();
if (curr_cie->do_print()) {
print_class(st, curr_cie, print_interfaces);
if (curr_cie->subclasses() != NULL) {
if (curr_cie->subclasses() != nullptr) {
// Current class has subclasses, so push all of them onto the stack.
for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
@ -404,7 +404,7 @@ void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, Klass
bool print_subclasses) {
// Set do_print for all superclasses of this class.
Klass* super = ((InstanceKlass*)cie->klass())->java_super();
while (super != NULL) {
while (super != nullptr) {
KlassInfoEntry* super_cie = cit->lookup(super);
super_cie->set_do_print(true);
super = super->super();
@ -416,7 +416,7 @@ void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, Klass
while (!class_stack.is_empty()) {
KlassInfoEntry* curr_cie = class_stack.pop();
curr_cie->set_do_print(true);
if (print_subclasses && curr_cie->subclasses() != NULL) {
if (print_subclasses && curr_cie->subclasses() != nullptr) {
// Current class has subclasses, so push all of them onto the stack.
for (int i = 0; i < curr_cie->subclasses()->length(); i++) {
KlassInfoEntry* cie = curr_cie->subclasses()->at(i);
@ -440,7 +440,7 @@ static void print_indent(outputStream* st, int indent) {
static void print_classname(outputStream* st, Klass* klass) {
oop loader_oop = klass->class_loader_data()->class_loader();
st->print("%s/", klass->external_name());
if (loader_oop == NULL) {
if (loader_oop == nullptr) {
st->print("null");
} else {
st->print(PTR_FORMAT, p2i(klass->class_loader_data()));
@ -461,7 +461,7 @@ void KlassHierarchy::print_class(outputStream* st, KlassInfoEntry* cie, bool pri
// Print indentation with proper indicators of superclass.
Klass* super = klass->super();
while (super != NULL) {
while (super != nullptr) {
super = super->super();
indent++;
}
@ -530,7 +530,7 @@ class RecordInstanceClosure : public ObjectClosure {
private:
bool should_visit(oop obj) {
return _filter == NULL || _filter->do_object_b(obj);
return _filter == nullptr || _filter->do_object_b(obj);
}
};
@ -571,7 +571,7 @@ uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *fil
ResourceMark rm;
WorkerThreads* workers = Universe::heap()->safepoint_workers();
if (workers != NULL) {
if (workers != nullptr) {
// The GC provided a WorkerThreads to be used during a safepoint.
// Can't run with more threads than provided by the WorkerThreads.
@ -601,7 +601,7 @@ void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num)
KlassInfoTable cit(false);
if (!cit.allocation_failed()) {
// populate table with object allocation info
uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
uintx missed_count = populate_table(&cit, nullptr, parallel_thread_num);
if (missed_count != 0) {
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
" total instances in data below",

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ class KlassInfoEntry: public CHeapObj<mtInternal> {
public:
KlassInfoEntry(Klass* k, KlassInfoEntry* next) :
_next(next), _klass(k), _instance_count(0), _instance_words(0), _index(-1),
_do_print(false), _subclasses(NULL)
_do_print(false), _subclasses(nullptr)
{}
~KlassInfoEntry();
KlassInfoEntry* next() const { return _next; }
@ -97,7 +97,7 @@ class KlassInfoBucket: public CHeapObj<mtInternal> {
void set_list(KlassInfoEntry* l) { _list = l; }
public:
KlassInfoEntry* lookup(Klass* k);
void initialize() { _list = NULL; }
void initialize() { _list = nullptr; }
void empty();
void iterate(KlassInfoClosure* cic);
};
@ -123,7 +123,7 @@ class KlassInfoTable: public StackObj {
~KlassInfoTable();
bool record_instance(const oop obj);
void iterate(KlassInfoClosure* cic);
bool allocation_failed() { return _buckets == NULL; }
bool allocation_failed() { return _buckets == nullptr; }
size_t size_of_instances_in_words() const;
bool merge(KlassInfoTable* table);
bool merge_entry(const KlassInfoEntry* cie);
@ -201,7 +201,7 @@ class KlassInfoClosure;
class HeapInspection : public StackObj {
public:
void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN;
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = nullptr, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,14 +50,14 @@ void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm != nullptr) {
do_nmethod(nm);
}
}
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL && nm->oops_do_try_claim()) {
if (nm != nullptr && nm->oops_do_try_claim()) {
// Process the oops in the nmethod
nm->oops_do(_cl);
@ -66,7 +66,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nm->mark_as_maybe_on_stack();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
if (bs_nm != nullptr) {
bs_nm->disarm(nm);
}
}
@ -79,7 +79,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
void CodeBlobToNMethodClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm != nullptr) {
_nm_cl->do_nmethod(nm);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,7 @@ class OopIterateClosure : public OopClosure {
protected:
OopIterateClosure(ReferenceDiscoverer* rd) : _ref_discoverer(rd) { }
OopIterateClosure() : _ref_discoverer(NULL) { }
OopIterateClosure() : _ref_discoverer(nullptr) { }
~OopIterateClosure() { }
void set_ref_discoverer_internal(ReferenceDiscoverer* rd) { _ref_discoverer = rd; }
@ -114,7 +114,7 @@ class OopIterateClosure : public OopClosure {
// An OopIterateClosure that can be used when there's no need to visit the Metadata.
class BasicOopIterateClosure : public OopIterateClosure {
public:
BasicOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) {}
BasicOopIterateClosure(ReferenceDiscoverer* rd = nullptr) : OopIterateClosure(rd) {}
virtual bool do_metadata() { return false; }
virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
@ -176,7 +176,7 @@ class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure {
const int _claim;
public:
ClaimMetadataVisitingOopIterateClosure(int claim, ReferenceDiscoverer* rd = NULL) :
ClaimMetadataVisitingOopIterateClosure(int claim, ReferenceDiscoverer* rd = nullptr) :
OopIterateClosure(rd),
_claim(claim) { }
@ -192,7 +192,7 @@ class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure {
// It's used to proxy through the metadata to the oops defined in them.
class MetadataVisitingOopIterateClosure: public ClaimMetadataVisitingOopIterateClosure {
public:
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL);
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = nullptr);
};
// ObjectClosure is used for iterating through an object space

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,7 @@ MemRegion* MemRegion::create_array(size_t length, MEMFLAGS flags) {
}
void MemRegion::destroy_array(MemRegion* array, size_t length) {
if (array == NULL) {
if (array == nullptr) {
return;
}
for (size_t i = 0; i < length; i++) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ private:
size_t _word_size;
public:
MemRegion() : _start(NULL), _word_size(0) {};
MemRegion() : _start(nullptr), _word_size(0) {};
MemRegion(HeapWord* start, size_t word_size) :
_start(start), _word_size(word_size) {};
MemRegion(HeapWord* start, HeapWord* end) :

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,8 @@ class MetadataFactory : AllStatic {
template <typename T>
static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
if (data != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
if (data != nullptr) {
assert(loader_data != nullptr, "shouldn't pass null");
assert(!data->is_shared(), "cannot deallocate array in shared spaces");
int size = data->size();
loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
@ -63,8 +63,8 @@ class MetadataFactory : AllStatic {
// Deallocation method for metadata
template <class T>
static void free_metadata(ClassLoaderData* loader_data, T* md) {
if (md != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
if (md != nullptr) {
assert(loader_data != nullptr, "shouldn't pass null");
int size = md->size();
// Call metadata's deallocate function which will deallocate fields and release_C_heap_structures
assert(!md->on_stack(), "can't deallocate things on stack");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -335,13 +335,13 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size
}
if (new_value > MaxMetaspaceSize) {
if (can_retry != NULL) {
if (can_retry != nullptr) {
*can_retry = false;
}
return false;
}
if (can_retry != NULL) {
if (can_retry != nullptr) {
*can_retry = true;
}
size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
@ -350,10 +350,10 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size
return false;
}
if (new_cap_until_GC != NULL) {
if (new_cap_until_GC != nullptr) {
*new_cap_until_GC = new_value;
}
if (old_cap_until_GC != NULL) {
if (old_cap_until_GC != nullptr) {
*old_cap_until_GC = old_capacity_until_GC;
}
return true;
@ -534,17 +534,17 @@ void MetaspaceGC::compute_new_size() {
////// Metaspace methods /////
const MetaspaceTracer* Metaspace::_tracer = NULL;
const MetaspaceTracer* Metaspace::_tracer = nullptr;
bool Metaspace::initialized() {
return metaspace::MetaspaceContext::context_nonclass() != NULL
return metaspace::MetaspaceContext::context_nonclass() != nullptr
LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
}
#ifdef _LP64
void Metaspace::print_compressed_class_space(outputStream* st) {
if (VirtualSpaceList::vslist_class() != NULL) {
if (VirtualSpaceList::vslist_class() != nullptr) {
MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
MetaWord* top = base + size;
@ -577,7 +577,7 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
// Returns true if class space has been setup (initialize_class_space).
bool Metaspace::class_space_is_initialized() {
return MetaspaceContext::context_class() != NULL;
return MetaspaceContext::context_class() != nullptr;
}
// Reserve a range of memory at an address suitable for en/decoding narrow
@ -612,10 +612,10 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
} search_ranges[] = {
{ (address)(4*G), (address)(32*G), 4*G, },
{ (address)(32*G), (address)(1024*G), (4 << LogKlassAlignmentInBytes) * G },
{ NULL, NULL, 0 }
{ nullptr, nullptr, 0 }
};
for (int i = 0; search_ranges[i].from != NULL; i ++) {
for (int i = 0; search_ranges[i].from != nullptr; i ++) {
address a = search_ranges[i].from;
assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
while (a < search_ranges[i].to) {
@ -636,7 +636,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
return ReservedSpace();
#else
// Default implementation: Just reserve anywhere.
return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)NULL);
return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)nullptr);
#endif // AARCH64
}
@ -760,7 +760,7 @@ void Metaspace::global_initialize() {
// case (b) (No CDS)
ReservedSpace rs;
const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
address base = NULL;
address base = nullptr;
// If CompressedClassSpaceBaseAddress is set, we attempt to force-map class space to
// the given address. This is a debug-only feature aiding tests. Due to the ASLR lottery
@ -795,7 +795,7 @@ void Metaspace::global_initialize() {
CompressedOops::end() : (address)HeapBaseMinAddress;
base = align_up(base, Metaspace::reserve_alignment());
if (base != NULL) {
if (base != nullptr) {
if (CompressedKlassPointers::is_valid_base(base)) {
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
os::vm_page_size(), (char*)base);
@ -831,7 +831,7 @@ void Metaspace::global_initialize() {
// We must prevent the very first address of the ccs from being used to store
// metadata, since that address would translate to a narrow pointer of 0, and the
// VM does not distinguish between "narrow 0 as in NULL" and "narrow 0 as in start
// VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start
// of ccs".
// Before Elastic Metaspace that did not happen due to the fact that every Metachunk
// had a header and therefore could not allocate anything at offset 0.
@ -867,7 +867,7 @@ size_t Metaspace::max_allocation_word_size() {
return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE;
}
// This version of Metaspace::allocate does not throw OOM but simply returns NULL, and
// This version of Metaspace::allocate does not throw OOM but simply returns null, and
// is suitable for calling from non-Java threads.
// Callers are responsible for checking null.
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
@ -875,7 +875,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
assert(word_size <= Metaspace::max_allocation_word_size(),
"allocation size too large (" SIZE_FORMAT ")", word_size);
assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
assert(loader_data != nullptr, "Should never pass around a nullptr loader_data. "
"ClassLoaderData::the_null_class_loader_data() should have been used.");
// Deal with concurrent unloading failed allocation starvation
@ -886,7 +886,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
// Try to allocate metadata.
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result != NULL) {
if (result != nullptr) {
// Zero initialize.
Copy::fill_to_words((HeapWord*)result, word_size, 0);
@ -901,12 +901,12 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
if (HAS_PENDING_EXCEPTION) {
assert(false, "Should not allocate with exception pending");
return NULL; // caller does a CHECK_NULL too
return nullptr; // caller does a CHECK_NULL too
}
MetaWord* result = allocate(loader_data, word_size, type);
if (result == NULL) {
if (result == nullptr) {
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
@ -918,10 +918,10 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
}
if (result == NULL) {
if (result == nullptr) {
report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
assert(HAS_PENDING_EXCEPTION, "sanity");
return NULL;
return nullptr;
}
// Zero initialize.
@ -943,7 +943,7 @@ void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_s
is_class_space_allocation(mdtype) ? "class" : "data", word_size);
ResourceMark rm;
if (log.is_debug()) {
if (loader_data->metaspace_or_null() != NULL) {
if (loader_data->metaspace_or_null() != nullptr) {
LogStream ls(log.debug());
loader_data->print_value_on(&ls);
}
@ -991,7 +991,7 @@ const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
case Metaspace::NonClassType: return "Metadata";
default:
assert(false, "Got bad mdtype: %d", (int) mdtype);
return NULL;
return nullptr;
}
}
@ -1002,12 +1002,12 @@ void Metaspace::purge(bool classes_unloaded) {
MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
if (classes_unloaded) {
ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
if (cm != NULL) {
if (cm != nullptr) {
cm->purge();
}
if (using_class_space()) {
cm = ChunkManager::chunkmanager_class();
if (cm != NULL) {
if (cm != nullptr) {
cm->purge();
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -113,7 +113,7 @@ public:
MetaspaceObj::Type type, TRAPS);
// Non-TRAPS version of allocate which can be called by a non-Java thread, that returns
// NULL on failure.
// null on failure.
static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
MetaspaceObj::Type type);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -121,7 +121,7 @@ private:
int index_for_next_non_empty_list(int index) {
assert(index >= 0 && index < num_lists, "Invalid index %d", index);
int i2 = index;
while (i2 < num_lists && _blocks[i2] == NULL) {
while (i2 < num_lists && _blocks[i2] == nullptr) {
i2 ++;
}
return i2 == num_lists ? -1 : i2;
@ -131,7 +131,7 @@ public:
BinListImpl() {
for (int i = 0; i < num_lists; i++) {
_blocks[i] = NULL;
_blocks[i] = nullptr;
}
}
@ -155,7 +155,7 @@ public:
if (index != -1) {
Block* b = _blocks[index];
const size_t real_word_size = word_size_for_index(index);
assert(b != NULL, "Sanity");
assert(b != nullptr, "Sanity");
assert(b->_word_size >= word_size &&
b->_word_size == real_word_size,
"bad block size in list[%d] (" BLOCK_FORMAT ")", index, BLOCK_FORMAT_ARGS(b));
@ -165,7 +165,7 @@ public:
return (MetaWord*)b;
} else {
*p_real_word_size = 0;
return NULL;
return nullptr;
}
}
@ -183,7 +183,7 @@ public:
for (int i = 0; i < num_lists; i++) {
const size_t s = MinWordSize + i;
int pos = 0;
for (Block* b = _blocks[i]; b != NULL; b = b->_next, pos++) {
for (Block* b = _blocks[i]; b != nullptr; b = b->_next, pos++) {
assert(b->_word_size == s,
"bad block size in list[%d] at pos %d (" BLOCK_FORMAT ")",
i, pos, BLOCK_FORMAT_ARGS(b));

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -101,7 +101,7 @@ void BlockTree::verify() const {
// Traverse the tree and test that all nodes are in the correct order.
MemRangeCounter counter;
if (_root != NULL) {
if (_root != nullptr) {
ResourceMark rm;
GrowableArray<walkinfo> stack;
@ -126,9 +126,9 @@ void BlockTree::verify() const {
counter.add(n->_word_size);
if (n == _root) {
tree_assert_invalid_node(n->_parent == NULL, n);
tree_assert_invalid_node(n->_parent == nullptr, n);
} else {
tree_assert_invalid_node(n->_parent != NULL, n);
tree_assert_invalid_node(n->_parent != nullptr, n);
}
// check size and ordering
@ -138,7 +138,7 @@ void BlockTree::verify() const {
tree_assert_invalid_node(n->_word_size < info.lim2, n);
// Check children
if (n->_left != NULL) {
if (n->_left != nullptr) {
tree_assert_invalid_node(n->_left != n, n);
tree_assert_invalid_node(n->_left->_parent == n, n);
@ -150,7 +150,7 @@ void BlockTree::verify() const {
stack.push(info2);
}
if (n->_right != NULL) {
if (n->_right != nullptr) {
tree_assert_invalid_node(n->_right != n, n);
tree_assert_invalid_node(n->_right->_parent == n, n);
@ -164,7 +164,7 @@ void BlockTree::verify() const {
// If node has same-sized siblings check those too.
const Node* n2 = n->_next;
while (n2 != NULL) {
while (n2 != nullptr) {
verify_node_pointer(n2);
tree_assert_invalid_node(n2 != n, n2); // catch simple circles
tree_assert_invalid_node(n2->_word_size == n->_word_size, n2);
@ -192,7 +192,7 @@ void BlockTree::print_tree(outputStream* st) const {
// as a quasi list is much clearer to the eye.
// We print the tree depth-first, with stacked nodes below normal ones
// (normal "real" nodes are marked with a leading '+')
if (_root != NULL) {
if (_root != nullptr) {
ResourceMark rm;
GrowableArray<walkinfo> stack;
@ -216,7 +216,7 @@ void BlockTree::print_tree(outputStream* st) const {
}
// Print same-sized-nodes stacked under this node
for (Node* n2 = n->_next; n2 != NULL; n2 = n2->_next) {
for (Node* n2 = n->_next; n2 != nullptr; n2 = n2->_next) {
st->print_raw(" ");
if (os::is_readable_pointer(n2)) {
st->print_cr(NODE_FORMAT, NODE_FORMAT_ARGS(n2));
@ -227,13 +227,13 @@ void BlockTree::print_tree(outputStream* st) const {
}
// Handle children.
if (n->_right != NULL) {
if (n->_right != nullptr) {
walkinfo info2;
info2.n = n->_right;
info2.depth = info.depth + 1;
stack.push(info2);
}
if (n->_left != NULL) {
if (n->_left != nullptr) {
walkinfo info2;
info2.n = n->_left;
info2.depth = info.depth + 1;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -103,10 +103,10 @@ class BlockTree: public CHeapObj<mtMetaspace> {
Node(size_t word_size) :
_canary(_canary_value),
_parent(NULL),
_left(NULL),
_right(NULL),
_next(NULL),
_parent(nullptr),
_left(nullptr),
_right(nullptr),
_next(nullptr),
_word_size(word_size)
{}
@ -144,7 +144,7 @@ private:
assert(head->_word_size == n->_word_size, "sanity");
n->_next = head->_next;
head->_next = n;
DEBUG_ONLY(n->_left = n->_right = n->_parent = NULL;)
DEBUG_ONLY(n->_left = n->_right = n->_parent = nullptr;)
}
// Given a node list starting at head, remove one of the follow up nodes from
@ -152,7 +152,7 @@ private:
// tree.
// List must contain at least one other node.
static Node* remove_from_list(Node* head) {
assert(head->_next != NULL, "sanity");
assert(head->_next != nullptr, "sanity");
Node* n = head->_next;
head->_next = n->_next;
return n;
@ -161,7 +161,7 @@ private:
// Given a node c and a node p, wire up c as left child of p.
static void set_left_child(Node* p, Node* c) {
p->_left = c;
if (c != NULL) {
if (c != nullptr) {
assert(c->_word_size < p->_word_size, "sanity");
c->_parent = p;
}
@ -170,7 +170,7 @@ private:
// Given a node c and a node p, wire up c as right child of p.
static void set_right_child(Node* p, Node* c) {
p->_right = c;
if (c != NULL) {
if (c != nullptr) {
assert(c->_word_size > p->_word_size, "sanity");
c->_parent = p;
}
@ -179,19 +179,19 @@ private:
// Given a node n, return its successor in the tree
// (node with the next-larger size).
static Node* successor(Node* n) {
Node* succ = NULL;
if (n->_right != NULL) {
Node* succ = nullptr;
if (n->_right != nullptr) {
// If there is a right child, search the left-most
// child of that child.
succ = n->_right;
while (succ->_left != NULL) {
while (succ->_left != nullptr) {
succ = succ->_left;
}
} else {
succ = n->_parent;
Node* n2 = n;
// As long as I am the right child of my parent, search upward
while (succ != NULL && n2 == succ->_right) {
while (succ != nullptr && n2 == succ->_right) {
n2 = succ;
succ = succ->_parent;
}
@ -203,7 +203,7 @@ private:
// If the node is root and has no parent, sets it as root.
void replace_node_in_parent(Node* child, Node* replace) {
Node* parent = child->_parent;
if (parent != NULL) {
if (parent != nullptr) {
if (parent->_left == child) { // Child is left child
set_left_child(parent, replace);
} else {
@ -212,8 +212,8 @@ private:
} else {
assert(child == _root, "must be root");
_root = replace;
if (replace != NULL) {
replace->_parent = NULL;
if (replace != nullptr) {
replace->_parent = nullptr;
}
}
return;
@ -221,21 +221,21 @@ private:
// Given a node n and an insertion point, insert n under insertion point.
void insert(Node* insertion_point, Node* n) {
assert(n->_parent == NULL, "Sanity");
assert(n->_parent == nullptr, "Sanity");
for (;;) {
DEBUG_ONLY(check_node(insertion_point);)
if (n->_word_size == insertion_point->_word_size) {
add_to_list(n, insertion_point); // parent stays NULL in this case.
add_to_list(n, insertion_point); // parent stays null in this case.
break;
} else if (n->_word_size > insertion_point->_word_size) {
if (insertion_point->_right == NULL) {
if (insertion_point->_right == nullptr) {
set_right_child(insertion_point, n);
break;
} else {
insertion_point = insertion_point->_right;
}
} else {
if (insertion_point->_left == NULL) {
if (insertion_point->_left == nullptr) {
set_left_child(insertion_point, n);
break;
} else {
@ -248,8 +248,8 @@ private:
// Given a node and a wish size, search this node and all children for
// the node closest (equal or larger sized) to the size s.
Node* find_closest_fit(Node* n, size_t s) {
Node* best_match = NULL;
while (n != NULL) {
Node* best_match = nullptr;
while (n != nullptr) {
DEBUG_ONLY(check_node(n);)
if (n->_word_size >= s) {
best_match = n;
@ -267,23 +267,23 @@ private:
// Given a wish size, search the whole tree for a
// node closest (equal or larger sized) to the size s.
Node* find_closest_fit(size_t s) {
if (_root != NULL) {
if (_root != nullptr) {
return find_closest_fit(_root, s);
}
return NULL;
return nullptr;
}
// Given a node n, remove it from the tree and repair tree.
void remove_node_from_tree(Node* n) {
assert(n->_next == NULL, "do not delete a node which has a non-empty list");
assert(n->_next == nullptr, "do not delete a node which has a non-empty list");
if (n->_left == NULL && n->_right == NULL) {
replace_node_in_parent(n, NULL);
if (n->_left == nullptr && n->_right == nullptr) {
replace_node_in_parent(n, nullptr);
} else if (n->_left == NULL && n->_right != NULL) {
} else if (n->_left == nullptr && n->_right != nullptr) {
replace_node_in_parent(n, n->_right);
} else if (n->_left != NULL && n->_right == NULL) {
} else if (n->_left != nullptr && n->_right == nullptr) {
replace_node_in_parent(n, n->_left);
} else {
@ -292,13 +292,13 @@ private:
// 1) Find direct successor (the next larger node).
Node* succ = successor(n);
// There has to be a successor since n->right was != NULL...
assert(succ != NULL, "must be");
// There has to be a successor since n->right was != null...
assert(succ != nullptr, "must be");
// ... and it should not have a left child since successor
// is supposed to be the next larger node, so it must be the mostleft node
// in the sub tree rooted at n->right
assert(succ->_left == NULL, "must be");
assert(succ->_left == nullptr, "must be");
assert(succ->_word_size > n->_word_size, "sanity");
Node* successor_parent = succ->_parent;
@ -343,14 +343,14 @@ private:
public:
BlockTree() : _root(NULL) {}
BlockTree() : _root(nullptr) {}
// Add a memory block to the tree. Its content will be overwritten.
void add_block(MetaWord* p, size_t word_size) {
DEBUG_ONLY(zap_range(p, word_size));
assert(word_size >= MinWordSize, "invalid block size " SIZE_FORMAT, word_size);
Node* n = new(p) Node(word_size);
if (_root == NULL) {
if (_root == nullptr) {
_root = n;
} else {
insert(_root, n);
@ -366,11 +366,11 @@ public:
Node* n = find_closest_fit(word_size);
if (n != NULL) {
if (n != nullptr) {
DEBUG_ONLY(check_node(n);)
assert(n->_word_size >= word_size, "sanity");
if (n->_next != NULL) {
if (n->_next != nullptr) {
// If the node is head of a chain of same sized nodes, we leave it alone
// and instead remove one of the follow up nodes (which is simpler than
// removing the chain head node and then having to graft the follow up
@ -388,7 +388,7 @@ public:
DEBUG_ONLY(zap_range(p, n->_word_size));
return p;
}
return NULL;
return nullptr;
}
// Returns number of blocks in this structure
@ -397,7 +397,7 @@ public:
// Returns total size, in words, of all elements.
size_t total_size() const { return _counter.total_size(); }
bool is_empty() const { return _root == NULL; }
bool is_empty() const { return _root == nullptr; }
DEBUG_ONLY(void print_tree(outputStream* st) const;)
DEBUG_ONLY(void verify() const;)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -32,19 +32,19 @@
namespace metaspace {
// Returns reference to the one global chunk header pool.
ChunkHeaderPool* ChunkHeaderPool::_chunkHeaderPool = NULL;
ChunkHeaderPool* ChunkHeaderPool::_chunkHeaderPool = nullptr;
ChunkHeaderPool::ChunkHeaderPool() :
_num_slabs(),
_first_slab(NULL),
_current_slab(NULL)
_first_slab(nullptr),
_current_slab(nullptr)
{}
// Note: the global chunk header pool gets never deleted; so this destructor only
// exists for the sake of tests.
ChunkHeaderPool::~ChunkHeaderPool() {
Slab* s = _first_slab;
while (s != NULL) {
while (s != nullptr) {
Slab* next_slab = s->_next;
os::free(s);
s = next_slab;
@ -53,11 +53,11 @@ ChunkHeaderPool::~ChunkHeaderPool() {
void ChunkHeaderPool::allocate_new_slab() {
Slab* slab = new Slab();
if (_current_slab != NULL) {
if (_current_slab != nullptr) {
_current_slab->_next = slab;
}
_current_slab = slab;
if (_first_slab == NULL) {
if (_first_slab == nullptr) {
_first_slab = slab;
}
_num_slabs.increment();
@ -69,7 +69,7 @@ size_t ChunkHeaderPool::memory_footprint_words() const {
}
void ChunkHeaderPool::initialize() {
assert(_chunkHeaderPool == NULL, "only once");
assert(_chunkHeaderPool == nullptr, "only once");
_chunkHeaderPool = new ChunkHeaderPool();
}
@ -77,7 +77,7 @@ void ChunkHeaderPool::initialize() {
void ChunkHeaderPool::verify() const {
const Slab* s = _first_slab;
int num = 0;
while (s != NULL) {
while (s != nullptr) {
assert(s->_top >= 0 && s->_top <= SlabCapacity,
"invalid slab at " PTR_FORMAT ", top: %d, slab cap: %d",
p2i(s), s->_top, SlabCapacity );

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -48,7 +48,7 @@ class ChunkHeaderPool : public CHeapObj<mtMetaspace> {
Slab* _next;
int _top;
Metachunk _elems [SlabCapacity];
Slab() : _next(NULL), _top(0) {
Slab() : _next(nullptr), _top(0) {
for (int i = 0; i < SlabCapacity; i++) {
_elems[i].clear();
}
@ -77,11 +77,11 @@ public:
Metachunk* allocate_chunk_header() {
DEBUG_ONLY(verify());
Metachunk* c = NULL;
Metachunk* c = nullptr;
c = _freelist.remove_first();
assert(c == NULL || c->is_dead(), "Not a freelist chunk header?");
if (c == NULL) {
if (_current_slab == NULL ||
assert(c == nullptr || c->is_dead(), "Not a freelist chunk header?");
if (c == nullptr) {
if (_current_slab == nullptr ||
_current_slab->_top == SlabCapacity) {
allocate_new_slab();
assert(_current_slab->_top < SlabCapacity, "Sanity");
@ -100,12 +100,12 @@ public:
void return_chunk_header(Metachunk* c) {
// We only ever should return free chunks, since returning chunks
// happens only on merging and merging only works with free chunks.
assert(c != NULL && c->is_free(), "Sanity");
assert(c != nullptr && c->is_free(), "Sanity");
#ifdef ASSERT
// In debug, fill dead header with pattern.
c->zap_header(0xCC);
c->set_next(NULL);
c->set_prev(NULL);
c->set_next(nullptr);
c->set_prev(nullptr);
#endif
c->set_dead();
_freelist.add(c);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -79,7 +79,7 @@ void ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chunklevel_t targ
assert_lock_strong(Metaspace_lock);
assert(c->is_free(), "chunk to be split must be free.");
assert(c->level() < target_level, "Target level must be higher than current level.");
assert(c->prev() == NULL && c->next() == NULL, "Chunk must be outside of any list.");
assert(c->prev() == nullptr && c->next() == nullptr, "Chunk must be outside of any list.");
DEBUG_ONLY(chunklevel::check_valid_level(target_level);)
DEBUG_ONLY(c->verify();)
@ -127,7 +127,7 @@ Metachunk* ChunkManager::get_chunk(chunklevel_t preferred_level, chunklevel_t ma
// On success, returns a chunk of level of <preferred_level>, but at most <max_level>.
// The first first <min_committed_words> of the chunk are guaranteed to be committed.
// On error, will return NULL.
// On error, will return null.
//
// This function may fail for two reasons:
// - Either we are unable to reserve space for a new chunk (if the underlying VirtualSpaceList
@ -151,54 +151,54 @@ Metachunk* ChunkManager::get_chunk_locked(chunklevel_t preferred_level, chunklev
// But for now, only consider chunks larger than a certain threshold -
// this is to prevent large loaders (eg boot) from unnecessarily gobbling up
// all the tiny splinter chunks lambdas leave around.
Metachunk* c = NULL;
Metachunk* c = nullptr;
c = _chunks.search_chunk_ascending(preferred_level, MIN2((chunklevel_t)(preferred_level + 2), max_level), min_committed_words);
// 2) Search larger committed chunks:
// If that did not yield anything, look at larger chunks, which may be committed. We would have to split
// them first, of course.
if (c == NULL) {
if (c == nullptr) {
c = _chunks.search_chunk_descending(preferred_level, min_committed_words);
}
// 3) Search best or smaller committed chunks (second attempt):
// Repeat (1) but now consider even the tiniest chunks as long as they are large enough to hold the
// committed min size.
if (c == NULL) {
if (c == nullptr) {
c = _chunks.search_chunk_ascending(preferred_level, max_level, min_committed_words);
}
// if we did not get anything yet, there are no free chunks committed enough. Repeat search but look for uncommitted chunks too:
// 4) Search best or smaller chunks, can be uncommitted:
if (c == NULL) {
if (c == nullptr) {
c = _chunks.search_chunk_ascending(preferred_level, max_level, 0);
}
// 5) Search a larger uncommitted chunk:
if (c == NULL) {
if (c == nullptr) {
c = _chunks.search_chunk_descending(preferred_level, 0);
}
if (c != NULL) {
if (c != nullptr) {
UL(trace, "taken from freelist.");
}
// Failing all that, allocate a new root chunk from the connected virtual space.
// This may fail if the underlying vslist cannot be expanded (e.g. compressed class space)
if (c == NULL) {
if (c == nullptr) {
c = _vslist->allocate_root_chunk();
if (c == NULL) {
if (c == nullptr) {
UL(info, "failed to get new root chunk.");
} else {
assert(c->level() == chunklevel::ROOT_CHUNK_LEVEL, "root chunk expected");
UL(debug, "allocated new root chunk.");
}
}
if (c == NULL) {
if (c == nullptr) {
// If we end up here, we found no match in the freelists and were unable to get a new
// root chunk (so we used up all address space, e.g. out of CompressedClassSpace).
UL2(info, "failed to get chunk (preferred level: " CHKLVL_FORMAT
", max level " CHKLVL_FORMAT ".", preferred_level, max_level);
c = NULL;
c = nullptr;
}
if (c != NULL) {
if (c != nullptr) {
// Now we have a chunk.
// It may be larger than what the caller wanted, so we may want to split it. This should
// always work.
@ -209,17 +209,17 @@ Metachunk* ChunkManager::get_chunk_locked(chunklevel_t preferred_level, chunklev
// Attempt to commit the chunk (depending on settings, we either fully commit it or just
// commit enough to get the caller going). That may fail if we hit a commit limit. In
// that case put the chunk back to the freelist (re-merging it with its neighbors if we
// did split it) and return NULL.
// did split it) and return null.
const size_t to_commit = Settings::new_chunks_are_fully_committed() ? c->word_size() : min_committed_words;
if (c->committed_words() < to_commit) {
if (c->ensure_committed_locked(to_commit) == false) {
UL2(info, "failed to commit " SIZE_FORMAT " words on chunk " METACHUNK_FORMAT ".",
to_commit, METACHUNK_FORMAT_ARGS(c));
return_chunk_locked(c);
c = NULL;
c = nullptr;
}
}
if (c != NULL) {
if (c != nullptr) {
// Still here? We have now a good chunk, all is well.
assert(c->committed_words() >= min_committed_words, "Sanity");
@ -265,13 +265,13 @@ void ChunkManager::return_chunk_locked(Metachunk* c) {
c->reset_used_words();
const chunklevel_t orig_lvl = c->level();
Metachunk* merged = NULL;
Metachunk* merged = nullptr;
if (!c->is_root_chunk()) {
// Only attempt merging if we are not of the lowest level already.
merged = c->vsnode()->merge(c, &_chunks);
}
if (merged != NULL) {
if (merged != nullptr) {
InternalStats::inc_num_chunk_merges();
DEBUG_ONLY(merged->verify());
// We did merge chunks and now have a bigger chunk.
@ -351,7 +351,7 @@ void ChunkManager::purge() {
l++) {
// Since we uncommit all chunks at this level, we do not break the "committed chunks are
// at the front of the list" condition.
for (Metachunk* c = _chunks.first_at_level(l); c != NULL; c = c->next()) {
for (Metachunk* c = _chunks.first_at_level(l); c != nullptr; c = c->next()) {
c->uncommit_locked();
}
}
@ -383,11 +383,11 @@ void ChunkManager::purge() {
// Convenience methods to return the global class-space chunkmanager
// and non-class chunkmanager, respectively.
ChunkManager* ChunkManager::chunkmanager_class() {
return MetaspaceContext::context_class() == NULL ? NULL : MetaspaceContext::context_class()->cm();
return MetaspaceContext::context_class() == nullptr ? nullptr : MetaspaceContext::context_class()->cm();
}
ChunkManager* ChunkManager::chunkmanager_nonclass() {
return MetaspaceContext::context_nonclass() == NULL ? NULL : MetaspaceContext::context_nonclass()->cm();
return MetaspaceContext::context_nonclass() == nullptr ? nullptr : MetaspaceContext::context_nonclass()->cm();
}
// Calculates the total number of committed words over all chunks. Walks chunks.
@ -420,7 +420,7 @@ void ChunkManager::verify() const {
void ChunkManager::verify_locked() const {
assert_lock_strong(Metaspace_lock);
assert(_vslist != NULL, "No vslist");
assert(_vslist != nullptr, "No vslist");
_chunks.verify();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -115,7 +115,7 @@ public:
// On success, returns a chunk of level of <preferred_level>, but at most <max_level>.
// The first <min_committed_words> of the chunk are guaranteed to be committed.
// On error, will return NULL.
// On error, will return null.
//
// This function may fail for two reasons:
// - Either we are unable to reserve space for a new chunk (if the underlying VirtualSpaceList

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -78,7 +78,7 @@ void CommitMask::verify() const {
// Walk the whole commit mask.
// For each 1 bit, check if the associated granule is accessible.
// For each 0 bit, check if the associated granule is not accessible. Slow mode only.
assert(_base != NULL && _word_size > 0 && _words_per_bit > 0, "Sanity");
assert(_base != nullptr && _word_size > 0 && _words_per_bit > 0, "Sanity");
assert_is_aligned(_base, _words_per_bit * BytesPerWord);
assert_is_aligned(_word_size, _words_per_bit);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -43,13 +43,13 @@ MetaWord* FreeBlocks::remove_block(size_t requested_word_size) {
assert(requested_word_size >= MinWordSize,
"requested_word_size too small (" SIZE_FORMAT ")", requested_word_size);
size_t real_size = 0;
MetaWord* p = NULL;
MetaWord* p = nullptr;
if (requested_word_size > MaxSmallBlocksWordSize) {
p = _tree.remove_block(requested_word_size, &real_size);
} else {
p = _small_blocks.remove_block(requested_word_size, &real_size);
}
if (p != NULL) {
if (p != nullptr) {
// Blocks which are larger than a certain threshold are split and
// the remainder is handed back to the manager.
const size_t waste = real_size - requested_word_size;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -34,7 +34,7 @@ namespace metaspace {
// Calculates total number of committed words over all chunks (walks chunks).
size_t FreeChunkList::calc_committed_word_size() const {
size_t s = 0;
for (const Metachunk* c = _first; c != NULL; c = c->next()) {
for (const Metachunk* c = _first; c != nullptr; c = c->next()) {
s += c->committed_words();
}
return s;
@ -42,7 +42,7 @@ size_t FreeChunkList::calc_committed_word_size() const {
void FreeChunkList::print_on(outputStream* st) const {
if (_num_chunks.get() > 0) {
for (const Metachunk* c = _first; c != NULL; c = c->next()) {
for (const Metachunk* c = _first; c != nullptr; c = c->next()) {
st->print(" - <");
c->print_on(st);
st->print(">");
@ -56,7 +56,7 @@ void FreeChunkList::print_on(outputStream* st) const {
#ifdef ASSERT
bool FreeChunkList::contains(const Metachunk* c) const {
for (Metachunk* c2 = _first; c2 != NULL; c2 = c2->next()) {
for (Metachunk* c2 = _first; c2 != nullptr; c2 = c2->next()) {
if (c2 == c) {
return true;
}
@ -65,17 +65,17 @@ bool FreeChunkList::contains(const Metachunk* c) const {
}
void FreeChunkList::verify() const {
if (_first == NULL) {
assert(_last == NULL, "Sanity");
if (_first == nullptr) {
assert(_last == nullptr, "Sanity");
} else {
assert(_last != NULL, "Sanity");
assert(_last != nullptr, "Sanity");
int num = 0;
for (Metachunk* c = _first; c != NULL; c = c->next()) {
for (Metachunk* c = _first; c != nullptr; c = c->next()) {
assert(c->is_free(), "Chunks in freelist should be free");
assert(c->used_words() == 0, "Chunk in freelist should have not used words.");
assert(c->level() == _first->level(), "wrong level");
assert(c->next() == NULL || c->next()->prev() == c, "front link broken");
assert(c->prev() == NULL || c->prev()->next() == c, "back link broken");
assert(c->next() == nullptr || c->next()->prev() == c, "front link broken");
assert(c->prev() == nullptr || c->prev()->next() == c, "back link broken");
assert(c != c->prev() && c != c->next(), "circle");
c->verify();
num++;
@ -119,34 +119,34 @@ int FreeChunkListVector::num_chunks() const {
// Look for a chunk: starting at level, up to and including max_level,
// return the first chunk whose committed words >= min_committed_words.
// Return NULL if no such chunk was found.
// Return null if no such chunk was found.
Metachunk* FreeChunkListVector::search_chunk_ascending(chunklevel_t level, chunklevel_t max_level, size_t min_committed_words) {
assert(min_committed_words <= chunklevel::word_size_for_level(max_level),
"min chunk size too small to hold min_committed_words");
for (chunklevel_t l = level; l <= max_level; l++) {
FreeChunkList* list = list_for_level(l);
Metachunk* c = list->first_minimally_committed(min_committed_words);
if (c != NULL) {
if (c != nullptr) {
list->remove(c);
return c;
}
}
return NULL;
return nullptr;
}
// Look for a chunk: starting at level, down to (including) the root chunk level,
// return the first chunk whose committed words >= min_committed_words.
// Return NULL if no such chunk was found.
// Return null if no such chunk was found.
Metachunk* FreeChunkListVector::search_chunk_descending(chunklevel_t level, size_t min_committed_words) {
for (chunklevel_t l = level; l >= chunklevel::LOWEST_CHUNK_LEVEL; l --) {
FreeChunkList* list = list_for_level(l);
Metachunk* c = list->first_minimally_committed(min_committed_words);
if (c != NULL) {
if (c != nullptr) {
list->remove(c);
return c;
}
}
return NULL;
return nullptr;
}
void FreeChunkListVector::print_on(outputStream* st) const {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -72,15 +72,15 @@ class FreeChunkList {
IntCounter _num_chunks;
void add_front(Metachunk* c) {
if (_first == NULL) {
assert(_last == NULL, "Sanity");
if (_first == nullptr) {
assert(_last == nullptr, "Sanity");
_first = _last = c;
c->set_prev(NULL);
c->set_next(NULL);
c->set_prev(nullptr);
c->set_next(nullptr);
} else {
assert(_last != NULL, "Sanity");
assert(_last != nullptr, "Sanity");
c->set_next(_first);
c->set_prev(NULL);
c->set_prev(nullptr);
_first->set_prev(c);
_first = c;
}
@ -88,14 +88,14 @@ class FreeChunkList {
// Add chunk to the back of the list.
void add_back(Metachunk* c) {
if (_last == NULL) {
assert(_first == NULL, "Sanity");
if (_last == nullptr) {
assert(_first == nullptr, "Sanity");
_last = _first = c;
c->set_prev(NULL);
c->set_next(NULL);
c->set_prev(nullptr);
c->set_next(nullptr);
} else {
assert(_first != NULL, "Sanity");
c->set_next(NULL);
assert(_first != nullptr, "Sanity");
c->set_next(nullptr);
c->set_prev(_last);
_last->set_next(c);
_last = c;
@ -105,8 +105,8 @@ class FreeChunkList {
public:
FreeChunkList() :
_first(NULL),
_last(NULL)
_first(nullptr),
_last(nullptr)
{}
// Remove given chunk from anywhere in the list.
@ -126,15 +126,15 @@ public:
if (_last == c) {
_last = pred;
}
c->set_next(NULL);
c->set_prev(NULL);
c->set_next(nullptr);
c->set_prev(nullptr);
_num_chunks.decrement();
return c;
}
void add(Metachunk* c) {
assert(contains(c) == false, "Chunk already in freelist");
assert(_first == NULL || _first->level() == c->level(),
assert(_first == nullptr || _first->level() == c->level(),
"List should only contains chunks of the same level.");
// Uncommitted chunks go to the back, fully or partially committed to the front.
if (c->committed_words() == 0) {
@ -145,34 +145,34 @@ public:
_num_chunks.increment();
}
// Removes the first chunk from the list and returns it. Returns NULL if list is empty.
// Removes the first chunk from the list and returns it. Returns null if list is empty.
Metachunk* remove_first() {
Metachunk* c = _first;
if (c != NULL) {
if (c != nullptr) {
remove(c);
}
return c;
}
// Returns reference to the first chunk in the list, or NULL
// Returns reference to the first chunk in the list, or null
Metachunk* first() const { return _first; }
// Returns reference to the fist chunk in the list with a committed word
// level >= min_committed_words, or NULL.
// level >= min_committed_words, or null.
Metachunk* first_minimally_committed(size_t min_committed_words) const {
// Since uncommitted chunks are added to the back we can stop looking once
// we encounter a fully uncommitted chunk.
Metachunk* c = first();
while (c != NULL &&
while (c != nullptr &&
c->committed_words() < min_committed_words &&
c->committed_words() > 0) {
c = c->next();
}
if (c != NULL &&
if (c != nullptr &&
c->committed_words() >= min_committed_words) {
return c;
}
return NULL;
return nullptr;
}
#ifdef ASSERT
@ -208,7 +208,7 @@ public:
list_for_chunk(c)->remove(c);
}
// Remove first node unless empty. Returns node or NULL.
// Remove first node unless empty. Returns node or null.
Metachunk* remove_first(chunklevel_t lvl) {
Metachunk* c = list_for_level(lvl)->remove_first();
return c;
@ -223,20 +223,20 @@ public:
return list_for_level(lvl)->num_chunks();
}
// Returns reference to first chunk at this level, or NULL if sublist is empty.
// Returns reference to first chunk at this level, or null if sublist is empty.
Metachunk* first_at_level(chunklevel_t lvl) const {
return list_for_level(lvl)->first();
}
// Look for a chunk: starting at level, up to and including max_level,
// return the first chunk whose committed words >= min_committed_words.
// Return NULL if no such chunk was found.
// Return null if no such chunk was found.
Metachunk* search_chunk_ascending(chunklevel_t level, chunklevel_t max_level,
size_t min_committed_words);
// Look for a chunk: starting at level, down to (including) the root chunk level,
// return the first chunk whose committed words >= min_committed_words.
// Return NULL if no such chunk was found.
// Return null if no such chunk was found.
Metachunk* search_chunk_descending(chunklevel_t level, size_t min_committed_words);
// Returns total size in all lists (including uncommitted areas)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -190,14 +190,14 @@ void Metachunk::verify_neighborhood() const {
assert(!is_dead(), "Do not call on dead chunks.");
if (is_root_chunk()) {
// Root chunks are all alone in the world.
assert(next_in_vs() == NULL || prev_in_vs() == NULL, "Root chunks should have no neighbors");
assert(next_in_vs() == nullptr || prev_in_vs() == nullptr, "Root chunks should have no neighbors");
} else {
// Non-root chunks have neighbors, at least one, possibly two.
assert(next_in_vs() != NULL || prev_in_vs() != NULL,
assert(next_in_vs() != nullptr || prev_in_vs() != nullptr,
"A non-root chunk should have neighbors (chunk @" PTR_FORMAT
", base " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
p2i(this), p2i(base()), level());
if (prev_in_vs() != NULL) {
if (prev_in_vs() != nullptr) {
assert(prev_in_vs()->end() == base(),
"Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to predecessor: " METACHUNK_FULL_FORMAT ".",
METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()));
@ -205,7 +205,7 @@ void Metachunk::verify_neighborhood() const {
"Chunk " METACHUNK_FULL_FORMAT ": broken link to left neighbor: " METACHUNK_FULL_FORMAT " (" PTR_FORMAT ").",
METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()), p2i(prev_in_vs()->next_in_vs()));
}
if (next_in_vs() != NULL) {
if (next_in_vs() != nullptr) {
assert(end() == next_in_vs()->base(),
"Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to successor: " METACHUNK_FULL_FORMAT ".",
METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(next_in_vs()));
@ -218,7 +218,7 @@ void Metachunk::verify_neighborhood() const {
// The chunk following us or preceding us may be our buddy or a splintered part of it.
Metachunk* buddy = is_leader() ? next_in_vs() : prev_in_vs();
assert(buddy != NULL, "Missing neighbor.");
assert(buddy != nullptr, "Missing neighbor.");
assert(!buddy->is_dead(), "Invalid buddy state.");
// This neighbor is either or buddy (same level) or a splinter of our buddy - hence
@ -268,7 +268,7 @@ void Metachunk::verify() const {
// Note: only call this on a life Metachunk.
chunklevel::check_valid_level(level());
assert(base() != NULL, "No base ptr");
assert(base() != nullptr, "No base ptr");
assert(committed_words() >= used_words(),
"mismatch: committed: " SIZE_FORMAT ", used: " SIZE_FORMAT ".",
committed_words(), used_words());
@ -277,8 +277,8 @@ void Metachunk::verify() const {
word_size(), committed_words());
// Test base pointer
assert(base() != NULL, "Base pointer NULL");
assert(vsnode() != NULL, "No space");
assert(base() != nullptr, "Base pointer nullptr");
assert(vsnode() != nullptr, "No space");
vsnode()->check_pointer(base());
// Starting address shall be aligned to chunk size.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -167,7 +167,7 @@ class VirtualSpaceNode;
class Metachunk {
// start of chunk memory; NULL if dead.
// start of chunk memory; null if dead.
MetaWord* _base;
// Used words.
@ -224,25 +224,25 @@ class Metachunk {
public:
Metachunk() :
_base(NULL),
_base(nullptr),
_used_words(0),
_committed_words(0),
_level(chunklevel::ROOT_CHUNK_LEVEL),
_state(State::Free),
_vsnode(NULL),
_prev(NULL), _next(NULL),
_prev_in_vs(NULL),
_next_in_vs(NULL)
_vsnode(nullptr),
_prev(nullptr), _next(nullptr),
_prev_in_vs(nullptr),
_next_in_vs(nullptr)
{}
void clear() {
_base = NULL;
_base = nullptr;
_used_words = 0; _committed_words = 0;
_level = chunklevel::ROOT_CHUNK_LEVEL;
_state = State::Free;
_vsnode = NULL;
_prev = NULL; _next = NULL;
_prev_in_vs = NULL; _next_in_vs = NULL;
_vsnode = nullptr;
_prev = nullptr; _next = nullptr;
_prev_in_vs = nullptr; _next_in_vs = nullptr;
}
size_t word_size() const { return chunklevel::word_size_for_level(_level); }
@ -258,7 +258,7 @@ public:
void set_next(Metachunk* c) { _next = c; }
Metachunk* next() const { return _next; }
DEBUG_ONLY(bool in_list() const { return _prev != NULL || _next != NULL; })
DEBUG_ONLY(bool in_list() const { return _prev != nullptr || _next != nullptr; })
// Physical neighbors wiring
void set_prev_in_vs(Metachunk* c) { DEBUG_ONLY(assert_have_expand_lock()); _prev_in_vs = c; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,7 @@ void MetachunkList::verify_does_not_contain(const Metachunk* c) const {
}
bool MetachunkList::contains(const Metachunk* c) const {
for (Metachunk* c2 = _first; c2 != NULL; c2 = c2->next()) {
for (Metachunk* c2 = _first; c2 != nullptr; c2 = c2->next()) {
if (c == c2) {
return true;
}
@ -49,8 +49,8 @@ bool MetachunkList::contains(const Metachunk* c) const {
void MetachunkList::verify() const {
int num = 0;
const Metachunk* last_c = NULL;
for (const Metachunk* c = _first; c != NULL; c = c->next()) {
const Metachunk* last_c = nullptr;
for (const Metachunk* c = _first; c != nullptr; c = c->next()) {
num++;
assert(c->prev() != c && c->next() != c, "circularity");
assert(c->prev() == last_c,
@ -65,12 +65,12 @@ void MetachunkList::verify() const {
#endif // ASSERT
size_t MetachunkList::calc_committed_word_size() const {
if (_first != NULL && _first->is_dead()) {
if (_first != nullptr && _first->is_dead()) {
// list used for chunk header pool; dead chunks have no size.
return 0;
}
size_t s = 0;
for (Metachunk* c = _first; c != NULL; c = c->next()) {
for (Metachunk* c = _first; c != nullptr; c = c->next()) {
assert(c->is_dead() == false, "Sanity");
s += c->committed_words();
}
@ -78,12 +78,12 @@ size_t MetachunkList::calc_committed_word_size() const {
}
size_t MetachunkList::calc_word_size() const {
if (_first != NULL && _first->is_dead()) {
if (_first != nullptr && _first->is_dead()) {
// list used for chunk header pool; dead chunks have no size.
return 0;
}
size_t s = 0;
for (Metachunk* c = _first; c != NULL; c = c->next()) {
for (Metachunk* c = _first; c != nullptr; c = c->next()) {
assert(c->is_dead() == false, "Sanity");
s += c->committed_words();
}
@ -92,7 +92,7 @@ size_t MetachunkList::calc_word_size() const {
void MetachunkList::print_on(outputStream* st) const {
if (_num_chunks.get() > 0) {
for (const Metachunk* c = _first; c != NULL; c = c->next()) {
for (const Metachunk* c = _first; c != nullptr; c = c->next()) {
st->print(" - <");
c->print_on(st);
st->print(">");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -53,7 +53,7 @@ class MetachunkList {
public:
MetachunkList() : _first(NULL), _num_chunks() {}
MetachunkList() : _first(nullptr), _num_chunks() {}
int count() const { return _num_chunks.get(); }
@ -72,14 +72,14 @@ public:
Metachunk* c = _first;
_first = _first->next();
if (_first) {
_first->set_prev(NULL);
_first->set_prev(nullptr);
}
_num_chunks.decrement();
c->set_prev(NULL);
c->set_next(NULL);
c->set_prev(nullptr);
c->set_next(nullptr);
return c;
}
return NULL;
return nullptr;
}
Metachunk* first() { return _first; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -65,7 +65,7 @@ void MetaspaceArena::salvage_chunk(Metachunk* c) {
UL2(trace, "salvaging chunk " METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
MetaWord* ptr = c->allocate(remaining_words);
assert(ptr != NULL, "Should have worked");
assert(ptr != nullptr, "Should have worked");
_total_used_words_counter->increment_by(remaining_words);
add_allocation_to_fbl(ptr, remaining_words);
@ -91,8 +91,8 @@ Metachunk* MetaspaceArena::allocate_new_chunk(size_t requested_word_size) {
const chunklevel_t preferred_level = MIN2(max_level, next_chunk_level());
Metachunk* c = _chunk_manager->get_chunk(preferred_level, max_level, requested_word_size);
if (c == NULL) {
return NULL;
if (c == nullptr) {
return nullptr;
}
assert(c->is_in_use(), "Wrong chunk state.");
@ -101,7 +101,7 @@ Metachunk* MetaspaceArena::allocate_new_chunk(size_t requested_word_size) {
}
void MetaspaceArena::add_allocation_to_fbl(MetaWord* p, size_t word_size) {
if (_fbl == NULL) {
if (_fbl == nullptr) {
_fbl = new FreeBlocks(); // Create only on demand
}
_fbl->add_block(p, word_size);
@ -114,11 +114,11 @@ MetaspaceArena::MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPol
_chunk_manager(chunk_manager),
_growth_policy(growth_policy),
_chunks(),
_fbl(NULL),
_fbl(nullptr),
_total_used_words_counter(total_used_words_counter),
_name(name)
#ifdef ASSERT
, _first_fence(NULL)
, _first_fence(nullptr)
#endif
{
UL(debug, ": born.");
@ -139,13 +139,13 @@ MetaspaceArena::~MetaspaceArena() {
MemRangeCounter return_counter;
Metachunk* c = _chunks.first();
Metachunk* c2 = NULL;
Metachunk* c2 = nullptr;
while (c) {
c2 = c->next();
return_counter.add(c->used_words());
DEBUG_ONLY(c->set_prev(NULL);)
DEBUG_ONLY(c->set_next(NULL);)
DEBUG_ONLY(c->set_prev(nullptr);)
DEBUG_ONLY(c->set_next(nullptr);)
UL2(debug, "return chunk: " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
_chunk_manager->return_chunk(c);
// c may be invalid after return_chunk(c) was called. Don't access anymore.
@ -218,18 +218,18 @@ bool MetaspaceArena::attempt_enlarge_current_chunk(size_t requested_word_size) {
// 2) Attempt to allocate from the current chunk.
// 3) Attempt to enlarge the current chunk in place if it is too small.
// 4) Attempt to get a new chunk and allocate from that chunk.
// At any point, if we hit a commit limit, we return NULL.
// At any point, if we hit a commit limit, we return null.
MetaWord* MetaspaceArena::allocate(size_t requested_word_size) {
MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
UL2(trace, "requested " SIZE_FORMAT " words.", requested_word_size);
MetaWord* p = NULL;
MetaWord* p = nullptr;
const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size);
// Before bothering the arena proper, attempt to re-use a block from the free blocks list
if (_fbl != NULL && !_fbl->is_empty()) {
if (_fbl != nullptr && !_fbl->is_empty()) {
p = _fbl->remove_block(raw_word_size);
if (p != NULL) {
if (p != nullptr) {
DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();)
UL2(trace, "taken from fbl (now: %d, " SIZE_FORMAT ").",
_fbl->count(), _fbl->total_size());
@ -245,10 +245,10 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) {
#ifdef ASSERT
// Fence allocation
if (p != NULL && Settings::use_allocation_guard()) {
if (p != nullptr && Settings::use_allocation_guard()) {
STATIC_ASSERT(is_aligned(sizeof(Fence), BytesPerWord));
MetaWord* guard = allocate_inner(sizeof(Fence) / BytesPerWord);
if (guard != NULL) {
if (guard != nullptr) {
// Ignore allocation errors for the fence to keep coding simple. If this
// happens (e.g. because right at this time we hit the Metaspace GC threshold)
// we miss adding this one fence. Not a big deal. Note that his would
@ -269,11 +269,11 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
assert_lock_strong(lock());
const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size);
MetaWord* p = NULL;
MetaWord* p = nullptr;
bool current_chunk_too_small = false;
bool commit_failure = false;
if (current_chunk() != NULL) {
if (current_chunk() != nullptr) {
// Attempt to satisfy the allocation from the current chunk.
@ -301,17 +301,17 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
// Allocate from the current chunk. This should work now.
if (!current_chunk_too_small && !commit_failure) {
p = current_chunk()->allocate(raw_word_size);
assert(p != NULL, "Allocation from chunk failed.");
assert(p != nullptr, "Allocation from chunk failed.");
}
}
if (p == NULL) {
if (p == nullptr) {
// If we are here, we either had no current chunk to begin with or it was deemed insufficient.
assert(current_chunk() == NULL ||
assert(current_chunk() == nullptr ||
current_chunk_too_small || commit_failure, "Sanity");
Metachunk* new_chunk = allocate_new_chunk(raw_word_size);
if (new_chunk != NULL) {
if (new_chunk != nullptr) {
UL2(debug, "allocated new chunk " METACHUNK_FORMAT " for requested word size " SIZE_FORMAT ".",
METACHUNK_FORMAT_ARGS(new_chunk), requested_word_size);
@ -321,7 +321,7 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
}
// We have a new chunk. Before making it the current chunk, retire the old one.
if (current_chunk() != NULL) {
if (current_chunk() != nullptr) {
salvage_chunk(current_chunk());
DEBUG_ONLY(InternalStats::inc_num_chunks_retired();)
}
@ -330,13 +330,13 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
// Now, allocate from that chunk. That should work.
p = current_chunk()->allocate(raw_word_size);
assert(p != NULL, "Allocation from chunk failed.");
assert(p != nullptr, "Allocation from chunk failed.");
} else {
UL2(info, "failed to allocate new chunk for requested word size " SIZE_FORMAT ".", requested_word_size);
}
}
if (p == NULL) {
if (p == nullptr) {
InternalStats::inc_num_allocs_failed_limit();
} else {
DEBUG_ONLY(InternalStats::inc_num_allocs();)
@ -345,8 +345,8 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
SOMETIMES(verify_locked();)
if (p == NULL) {
UL(info, "allocation failed, returned NULL.");
if (p == nullptr) {
UL(info, "allocation failed, returned nullptr.");
} else {
UL2(trace, "after allocation: %u chunk(s), current:" METACHUNK_FULL_FORMAT,
_chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk()));
@ -360,7 +360,7 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
void MetaspaceArena::deallocate_locked(MetaWord* p, size_t word_size) {
assert_lock_strong(lock());
// At this point a current chunk must exist since we only deallocate if we did allocate before.
assert(current_chunk() != NULL, "stray deallocation?");
assert(current_chunk() != nullptr, "stray deallocation?");
assert(is_valid_area(p, word_size),
"Pointer range not part of this Arena and cannot be deallocated: (" PTR_FORMAT ".." PTR_FORMAT ").",
p2i(p), p2i(p + word_size));
@ -385,7 +385,7 @@ void MetaspaceArena::deallocate(MetaWord* p, size_t word_size) {
void MetaspaceArena::add_to_statistics(ArenaStats* out) const {
MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) {
for (const Metachunk* c = _chunks.first(); c != nullptr; c = c->next()) {
InUseChunkStats& ucs = out->_stats[c->level()];
ucs._num++;
ucs._word_size += c->word_size();
@ -399,7 +399,7 @@ void MetaspaceArena::add_to_statistics(ArenaStats* out) const {
}
}
if (_fbl != NULL) {
if (_fbl != nullptr) {
out->_free_blocks_num += _fbl->count();
out->_free_blocks_word_size += _fbl->total_size();
}
@ -412,18 +412,18 @@ void MetaspaceArena::add_to_statistics(ArenaStats* out) const {
void MetaspaceArena::usage_numbers(size_t* p_used_words, size_t* p_committed_words, size_t* p_capacity_words) const {
MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
size_t used = 0, comm = 0, cap = 0;
for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) {
for (const Metachunk* c = _chunks.first(); c != nullptr; c = c->next()) {
used += c->used_words();
comm += c->committed_words();
cap += c->word_size();
}
if (p_used_words != NULL) {
if (p_used_words != nullptr) {
*p_used_words = used;
}
if (p_committed_words != NULL) {
if (p_committed_words != nullptr) {
*p_committed_words = comm;
}
if (p_capacity_words != NULL) {
if (p_capacity_words != nullptr) {
*p_capacity_words = cap;
}
}
@ -432,9 +432,9 @@ void MetaspaceArena::usage_numbers(size_t* p_used_words, size_t* p_committed_wor
void MetaspaceArena::verify_locked() const {
assert_lock_strong(lock());
assert(_growth_policy != NULL && _chunk_manager != NULL, "Sanity");
assert(_growth_policy != nullptr && _chunk_manager != nullptr, "Sanity");
_chunks.verify();
if (_fbl != NULL) {
if (_fbl != nullptr) {
_fbl->verify();
}
}
@ -446,7 +446,7 @@ void MetaspaceArena::Fence::verify() const {
void MetaspaceArena::verify_allocation_guards() const {
assert(Settings::use_allocation_guard(), "Don't call with guards disabled.");
for (const Fence* f = _first_fence; f != NULL; f = f->next()) {
for (const Fence* f = _first_fence; f != nullptr; f = f->next()) {
f->verify();
}
}
@ -459,9 +459,9 @@ void MetaspaceArena::verify() const {
// Returns true if the area indicated by pointer and size have actually been allocated
// from this arena.
bool MetaspaceArena::is_valid_area(MetaWord* p, size_t word_size) const {
assert(p != NULL && word_size > 0, "Sanity");
assert(p != nullptr && word_size > 0, "Sanity");
bool found = false;
for (const Metachunk* c = _chunks.first(); c != NULL && !found; c = c->next()) {
for (const Metachunk* c = _chunks.first(); c != nullptr && !found; c = c->next()) {
assert(c->is_valid_committed_pointer(p) ==
c->is_valid_committed_pointer(p + word_size - 1), "range intersects");
found = c->is_valid_committed_pointer(p);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -175,7 +175,7 @@ public:
// 2) Attempt to allocate from the current chunk.
// 3) Attempt to enlarge the current chunk in place if it is too small.
// 4) Attempt to get a new chunk and allocate from that chunk.
// At any point, if we hit a commit limit, we return NULL.
// At any point, if we hit a commit limit, we return null.
MetaWord* allocate(size_t word_size);
// Prematurely returns a metaspace allocation to the _block_freelists because it is not

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -118,7 +118,7 @@ const ArenaGrowthPolicy* ArenaGrowthPolicy::policy_for_space_type(Metaspace::Met
}
}
return NULL;
return nullptr;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -49,7 +49,7 @@ void print_scaled_words_and_percentage(outputStream* st, size_t word_size, size_
}
static const char* display_unit_for_scale(size_t scale) {
const char* s = NULL;
const char* s = nullptr;
switch(scale) {
case 1: s = "bytes"; break;
case BytesPerWord: s = "words"; break;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -34,8 +34,8 @@
namespace metaspace {
MetaspaceContext* MetaspaceContext::_class_space_context = NULL;
MetaspaceContext* MetaspaceContext::_nonclass_space_context = NULL;
MetaspaceContext* MetaspaceContext::_class_space_context = nullptr;
MetaspaceContext* MetaspaceContext::_nonclass_space_context = nullptr;
// Destroys the context: deletes chunkmanager and virtualspacelist.
// If this is a non-expandable context over an existing space, that space remains

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -100,7 +100,7 @@ public:
static MetaspaceContext* context_nonclass() { return _nonclass_space_context; }
// Returns pointer to the global class space context, if compressed class space is active,
// NULL otherwise.
// null otherwise.
static MetaspaceContext* context_class() { return _class_space_context; }
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -60,8 +60,8 @@ MetaspaceDCmd::MetaspaceDCmd(outputStream* output, bool heap) :
int MetaspaceDCmd::num_arguments() {
ResourceMark rm;
MetaspaceDCmd* dcmd = new MetaspaceDCmd(NULL, false);
if (dcmd != NULL) {
MetaspaceDCmd* dcmd = new MetaspaceDCmd(nullptr, false);
if (dcmd != nullptr) {
DCmdMark mark(dcmd);
return dcmd->_dcmdparser.num_arguments();
} else {
@ -73,7 +73,7 @@ void MetaspaceDCmd::execute(DCmdSource source, TRAPS) {
// Parse scale value.
const char* scale_value = _scale.value();
size_t scale = 0;
if (scale_value != NULL) {
if (scale_value != nullptr) {
if (strcasecmp("dynamic", scale_value) == 0) {
scale = 0;
} else {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -54,7 +54,7 @@ public:
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", NULL};
"monitor", nullptr};
return p;
}
static int num_arguments();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -43,7 +43,7 @@
namespace metaspace {
static const char* describe_spacetype(Metaspace::MetaspaceType st) {
const char* s = NULL;
const char* s = nullptr;
switch (st) {
case Metaspace::StandardMetaspaceType: s = "Standard"; break;
case Metaspace::BootMetaspaceType: s = "Boot"; break;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -80,7 +80,7 @@ void PrintCLDMetaspaceInfoClosure::do_cld(ClassLoaderData* cld) {
}
ClassLoaderMetaspace* msp = cld->metaspace_or_null();
if (msp == NULL) {
if (msp == nullptr) {
_num_loaders_without_metaspace++;
return;
}
@ -111,15 +111,15 @@ void PrintCLDMetaspaceInfoClosure::do_cld(ClassLoaderData* cld) {
// Print "CLD for [<loader name>,] instance of <loader class name>"
// or "CLD for <hidden>, loaded by [<loader name>,] instance of <loader class name>"
ResourceMark rm;
const char* name = NULL;
const char* class_name = NULL;
const char* name = nullptr;
const char* class_name = nullptr;
// Note: this should also work if unloading:
Klass* k = cld->class_loader_klass();
if (k != NULL) {
if (k != nullptr) {
class_name = k->external_name();
Symbol* s = cld->name();
if (s != NULL) {
if (s != nullptr) {
name = s->as_C_string();
}
} else {
@ -135,10 +135,10 @@ void PrintCLDMetaspaceInfoClosure::do_cld(ClassLoaderData* cld) {
if (cld->has_class_mirror_holder()) {
_out->print(" <hidden class>, loaded by");
}
if (name != NULL) {
if (name != nullptr) {
_out->print(" \"%s\"", name);
}
if (class_name != NULL) {
if (class_name != nullptr) {
_out->print(" instance of %s", class_name);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -41,14 +41,14 @@ namespace metaspace {
RootChunkArea::RootChunkArea(const MetaWord* base) :
_base(base),
_first_chunk(NULL)
_first_chunk(nullptr)
{}
RootChunkArea::~RootChunkArea() {
// This is called when a VirtualSpaceNode is destructed (purged).
// All chunks should be free of course. In fact, there should only
// be one chunk, since all free chunks should have been merged.
if (_first_chunk != NULL) {
if (_first_chunk != nullptr) {
assert(_first_chunk->is_root_chunk() && _first_chunk->is_free(),
"Cannot delete root chunk area if not all chunks are free.");
ChunkHeaderPool::pool()->return_chunk_header(_first_chunk);
@ -123,7 +123,7 @@ void RootChunkArea::split(chunklevel_t target_level, Metachunk* c, FreeChunkList
}
// Insert splinter chunk into vs list
if (c->next_in_vs() != NULL) {
if (c->next_in_vs() != nullptr) {
c->next_in_vs()->set_prev_in_vs(splinter_chunk);
}
splinter_chunk->set_next_in_vs(c->next_in_vs());
@ -146,11 +146,11 @@ void RootChunkArea::split(chunklevel_t target_level, Metachunk* c, FreeChunkList
// Given a chunk, attempt to merge it recursively with its neighboring chunks.
//
// If successful (merged at least once), returns address of
// the merged chunk; NULL otherwise.
// the merged chunk; null otherwise.
//
// The merged chunks are removed from the freelists.
//
// !!! Please note that if this method returns a non-NULL value, the
// !!! Please note that if this method returns a non-null value, the
// original chunk will be invalid and should not be accessed anymore! !!!
Metachunk* RootChunkArea::merge(Metachunk* c, FreeChunkListVector* freelists) {
// Note rules:
@ -199,7 +199,7 @@ Metachunk* RootChunkArea::merge(Metachunk* c, FreeChunkListVector* freelists) {
log_trace(metaspace)("Attempting to merge chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
bool stop = false;
Metachunk* result = NULL;
Metachunk* result = nullptr;
do {
@ -250,7 +250,7 @@ Metachunk* RootChunkArea::merge(Metachunk* c, FreeChunkListVector* freelists) {
// Leader survives, follower chunk is freed. Remove follower from vslist ..
leader->set_next_in_vs(follower->next_in_vs());
if (follower->next_in_vs() != NULL) {
if (follower->next_in_vs() != nullptr) {
follower->next_in_vs()->set_prev_in_vs(leader);
}
@ -276,7 +276,7 @@ Metachunk* RootChunkArea::merge(Metachunk* c, FreeChunkListVector* freelists) {
#ifdef ASSERT
verify();
if (result != NULL) {
if (result != nullptr) {
result->verify();
}
#endif // ASSERT
@ -333,7 +333,7 @@ bool RootChunkArea::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* fre
// Remove buddy from vs list...
Metachunk* successor = buddy->next_in_vs();
if (successor != NULL) {
if (successor != nullptr) {
successor->set_prev_in_vs(c);
}
c->set_next_in_vs(successor);
@ -360,7 +360,7 @@ bool RootChunkArea::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* fre
// In that case, it should only contain one chunk (maximally merged, so a root chunk)
// and it should be free.
bool RootChunkArea::is_free() const {
return _first_chunk == NULL ||
return _first_chunk == nullptr ||
(_first_chunk->is_root_chunk() && _first_chunk->is_free());
}
@ -381,13 +381,13 @@ void RootChunkArea::verify() const {
// being adjacent to each other, and cover the complete area
int num_chunk = 0;
if (_first_chunk != NULL) {
assrt_(_first_chunk->prev_in_vs() == NULL, "Sanity");
if (_first_chunk != nullptr) {
assrt_(_first_chunk->prev_in_vs() == nullptr, "Sanity");
const Metachunk* c = _first_chunk;
const MetaWord* expected_next_base = _base;
while (c != NULL) {
while (c != nullptr) {
assrt_(c->is_free() || c->is_in_use(),
"Chunk No. %d " METACHUNK_FORMAT " - invalid state.",
num_chunk, METACHUNK_FORMAT_ARGS(c));
@ -413,7 +413,7 @@ void RootChunkArea::verify() const {
void RootChunkArea::verify_area_is_ideally_merged() const {
SOMETIMES(assert_lock_strong(Metaspace_lock);)
int num_chunk = 0;
for (const Metachunk* c = _first_chunk; c != NULL; c = c->next_in_vs()) {
for (const Metachunk* c = _first_chunk; c != nullptr; c = c->next_in_vs()) {
if (!c->is_root_chunk() && c->is_free()) {
// If a chunk is free, it must not have a buddy which is also free, because
// those chunks should have been merged.
@ -432,12 +432,12 @@ void RootChunkArea::verify_area_is_ideally_merged() const {
void RootChunkArea::print_on(outputStream* st) const {
st->print(PTR_FORMAT ": ", p2i(base()));
if (_first_chunk != NULL) {
if (_first_chunk != nullptr) {
const Metachunk* c = _first_chunk;
// 01234567890123
const char* letters_for_levels_cap = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
const char* letters_for_levels = "abcdefghijklmnopqrstuvwxyz";
while (c != NULL) {
while (c != nullptr) {
const chunklevel_t l = c->level();
if (l >= 0 && (size_t)l < strlen(letters_for_levels)) {
st->print("%c", c->is_free() ? letters_for_levels[c->level()] : letters_for_levels_cap[c->level()]);
@ -453,12 +453,12 @@ void RootChunkArea::print_on(outputStream* st) const {
st->cr();
}
// Create an array of ChunkTree objects, all initialized to NULL, covering
// Create an array of ChunkTree objects, all initialized to null, covering
// a given memory range. Memory range must be a multiple of root chunk size.
RootChunkAreaLUT::RootChunkAreaLUT(const MetaWord* base, size_t word_size) :
_base(base),
_num((int)(word_size / chunklevel::MAX_CHUNK_WORD_SIZE)),
_arr(NULL)
_arr(nullptr)
{
assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
_arr = NEW_C_HEAP_ARRAY(RootChunkArea, _num, mtClass);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -82,11 +82,11 @@ public:
// Given a chunk, attempt to merge it recursively with its neighboring chunks.
//
// If successful (merged at least once), returns address of
// the merged chunk; NULL otherwise.
// the merged chunk; null otherwise.
//
// The merged chunks are removed from the freelists.
//
// !!! Please note that if this method returns a non-NULL value, the
// !!! Please note that if this method returns a non-null value, the
// original chunk will be invalid and should not be accessed anymore! !!!
Metachunk* merge(Metachunk* c, FreeChunkListVector* freelists);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -41,7 +41,7 @@ size_t RunningCounters::reserved_words() {
size_t RunningCounters::reserved_words_class() {
VirtualSpaceList* vs = VirtualSpaceList::vslist_class();
return vs != NULL ? vs->reserved_words() : 0;
return vs != nullptr ? vs->reserved_words() : 0;
}
size_t RunningCounters::reserved_words_nonclass() {
@ -55,7 +55,7 @@ size_t RunningCounters::committed_words() {
size_t RunningCounters::committed_words_class() {
VirtualSpaceList* vs = VirtualSpaceList::vslist_class();
return vs != NULL ? vs->committed_words() : 0;
return vs != nullptr ? vs->committed_words() : 0;
}
size_t RunningCounters::committed_words_nonclass() {
@ -86,7 +86,7 @@ size_t RunningCounters::free_chunks_words() {
size_t RunningCounters::free_chunks_words_class() {
ChunkManager* cm = ChunkManager::chunkmanager_class();
return cm != NULL ? cm->total_word_size() : 0;
return cm != nullptr ? cm->total_word_size() : 0;
}
size_t RunningCounters::free_chunks_words_nonclass() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -62,7 +62,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
_name(name),
_reserve_limit(reserve_limit),
_commit_limit(commit_limit),
_context(NULL),
_context(nullptr),
_commit_limiter(commit_limit == 0 ? max_uintx : commit_limit), // commit_limit == 0 -> no limit
_used_words_counter(),
_rs()
@ -94,7 +94,7 @@ MetaspaceTestContext::~MetaspaceTestContext() {
MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType type) {
const ArenaGrowthPolicy* growth_policy = ArenaGrowthPolicy::policy_for_space_type(type, false);
Mutex* lock = new Mutex(Monitor::nosafepoint, "MetaspaceTestArea_lock");
MetaspaceArena* arena = NULL;
MetaspaceArena* arena = nullptr;
{
MutexLocker ml(lock, Mutex::_no_safepoint_check_flag);
arena = new MetaspaceArena(_context->cm(), growth_policy, lock, &_used_words_counter, _name);
@ -108,7 +108,7 @@ void MetaspaceTestContext::purge_area() {
#ifdef ASSERT
void MetaspaceTestContext::verify() const {
if (_context != NULL) {
if (_context != nullptr) {
_context->verify();
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,7 +45,7 @@ namespace metaspace {
// Create a new, empty, expandable list.
VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limiter) :
_name(name),
_first_node(NULL),
_first_node(nullptr),
_can_expand(true),
_commit_limiter(commit_limiter),
_reserved_words_counter(),
@ -57,7 +57,7 @@ VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limit
// It will be not expandable beyond that first node.
VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter) :
_name(name),
_first_node(NULL),
_first_node(nullptr),
_can_expand(false),
_commit_limiter(commit_limiter),
_reserved_words_counter(),
@ -67,9 +67,9 @@ VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLim
// for this list since we cannot expand.
VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(rs, _commit_limiter,
&_reserved_words_counter, &_committed_words_counter);
assert(vsn != NULL, "node creation failed");
assert(vsn != nullptr, "node creation failed");
_first_node = vsn;
_first_node->set_next(NULL);
_first_node->set_next(nullptr);
_nodes_counter.increment();
}
@ -81,7 +81,7 @@ VirtualSpaceList::~VirtualSpaceList() {
// lists in metaspace are immortal.
VirtualSpaceNode* vsn = _first_node;
VirtualSpaceNode* vsn2 = vsn;
while (vsn != NULL) {
while (vsn != nullptr) {
vsn2 = vsn->next();
delete vsn;
vsn = vsn2;
@ -110,14 +110,14 @@ void VirtualSpaceList::create_new_node() {
Metachunk* VirtualSpaceList::allocate_root_chunk() {
assert_lock_strong(Metaspace_lock);
if (_first_node == NULL ||
if (_first_node == nullptr ||
_first_node->free_words() < chunklevel::MAX_CHUNK_WORD_SIZE) {
#ifdef ASSERT
// Since all allocations from a VirtualSpaceNode happen in
// root-chunk-size units, and the node size must be root-chunk-size aligned,
// we should never have left-over space.
if (_first_node != NULL) {
if (_first_node != nullptr) {
assert(_first_node->free_words() == 0, "Sanity");
}
#endif
@ -127,12 +127,12 @@ Metachunk* VirtualSpaceList::allocate_root_chunk() {
UL2(debug, "added new node (now: %d).", num_nodes());
} else {
UL(debug, "list cannot expand.");
return NULL; // We cannot expand this list.
return nullptr; // We cannot expand this list.
}
}
Metachunk* c = _first_node->allocate_root_chunk();
assert(c != NULL, "This should have worked");
assert(c != nullptr, "This should have worked");
return c;
}
@ -144,7 +144,7 @@ void VirtualSpaceList::print_on(outputStream* st) const {
st->print_cr("vsl %s:", _name);
const VirtualSpaceNode* vsn = _first_node;
int n = 0;
while (vsn != NULL) {
while (vsn != nullptr) {
st->print("- node #%d: ", n);
vsn->print_on(st);
vsn = vsn->next();
@ -157,15 +157,15 @@ void VirtualSpaceList::print_on(outputStream* st) const {
#ifdef ASSERT
void VirtualSpaceList::verify_locked() const {
assert_lock_strong(Metaspace_lock);
assert(_name != NULL, "Sanity");
assert(_name != nullptr, "Sanity");
int n = 0;
if (_first_node != NULL) {
if (_first_node != nullptr) {
size_t total_reserved_words = 0;
size_t total_committed_words = 0;
const VirtualSpaceNode* vsn = _first_node;
while (vsn != NULL) {
while (vsn != nullptr) {
n++;
vsn->verify_locked();
total_reserved_words += vsn->word_size();
@ -191,7 +191,7 @@ void VirtualSpaceList::verify() const {
bool VirtualSpaceList::contains(const MetaWord* p) const {
// Note: needs to work without locks.
const VirtualSpaceNode* vsn = Atomic::load_acquire(&_first_node);
while (vsn != NULL) {
while (vsn != nullptr) {
if (vsn->contains(p)) {
return true;
}
@ -203,11 +203,11 @@ bool VirtualSpaceList::contains(const MetaWord* p) const {
// Convenience methods to return the global class-space chunkmanager
// and non-class chunkmanager, respectively.
VirtualSpaceList* VirtualSpaceList::vslist_class() {
return MetaspaceContext::context_class() == NULL ? NULL : MetaspaceContext::context_class()->vslist();
return MetaspaceContext::context_class() == nullptr ? nullptr : MetaspaceContext::context_class()->vslist();
}
VirtualSpaceList* VirtualSpaceList::vslist_nonclass() {
return MetaspaceContext::context_nonclass() == NULL ? NULL : MetaspaceContext::context_nonclass()->vslist();
return MetaspaceContext::context_nonclass() == nullptr ? nullptr : MetaspaceContext::context_nonclass()->vslist();
}
} // namespace metaspace

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -103,7 +103,7 @@ public:
// Allocate a root chunk from this list.
// Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
// Hence, before using this chunk, it must be committed.
// May return NULL if vslist would need to be expanded to hold the new root node but
// May return null if vslist would need to be expanded to hold the new root node but
// the list cannot be expanded (in practice this means we reached CompressedClassSpaceSize).
Metachunk* allocate_root_chunk();
@ -136,8 +136,8 @@ public:
// These exist purely to print limits of the compressed class space;
// if we ever change the ccs to not use a degenerated-list-of-one-node this
// will go away.
MetaWord* base_of_first_node() const { return _first_node != NULL ? _first_node->base() : NULL; }
size_t word_size_of_first_node() const { return _first_node != NULL ? _first_node->word_size() : 0; }
MetaWord* base_of_first_node() const { return _first_node != nullptr ? _first_node->base() : nullptr; }
size_t word_size_of_first_node() const { return _first_node != nullptr ? _first_node->word_size() : 0; }
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -159,7 +159,7 @@ bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
// Returns true if success, false if it did hit a commit limit.
bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
assert_lock_strong(Metaspace_lock);
assert(p != NULL && word_size > 0, "Sanity");
assert(p != nullptr && word_size > 0, "Sanity");
MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
return commit_range(p_start, p_end - p_start);
@ -216,7 +216,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
SizeCounter* reserve_counter, SizeCounter* commit_counter) :
_next(NULL),
_next(nullptr),
_rs(rs),
_owns_rs(owns_rs),
_base((MetaWord*)rs.base()),
@ -292,7 +292,7 @@ VirtualSpaceNode::~VirtualSpaceNode() {
//// Chunk allocation, splitting, merging /////
// Allocate a root chunk from this node. Will fail and return NULL if the node is full
// Allocate a root chunk from this node. Will fail and return null if the node is full
// - if we used up the whole address space of this node's memory region.
// (in case this node backs compressed class space, this is how we hit
// CompressedClassSpaceSize).
@ -318,7 +318,7 @@ Metachunk* VirtualSpaceNode::allocate_root_chunk() {
UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
return c;
}
return NULL; // Node is full.
return nullptr; // Node is full.
}
// Given a chunk c, split it recursively until you get a chunk of the given target_level.
@ -336,14 +336,14 @@ void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkL
// Given a chunk, attempt to merge it recursively with its neighboring chunks.
//
// If successful (merged at least once), returns address of
// the merged chunk; NULL otherwise.
// the merged chunk; null otherwise.
//
// The merged chunks are removed from the freelists.
//
// !!! Please note that if this method returns a non-NULL value, the
// !!! Please note that if this method returns a non-null value, the
// original chunk will be invalid and should not be accessed anymore! !!!
Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
assert(c != NULL && c->is_free(), "Sanity");
assert(c != nullptr && c->is_free(), "Sanity");
assert_lock_strong(Metaspace_lock);
// Get the rca associated with this chunk and let it handle the merging
@ -363,7 +363,7 @@ Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists)
//
// On success, true is returned, false otherwise.
bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
assert(c != nullptr && c->is_in_use() && !c->is_root_chunk(), "Sanity");
assert_lock_strong(Metaspace_lock);
// Get the rca associated with this chunk and let it handle the merging
@ -411,7 +411,7 @@ volatile int test_access = 0;
// Verify counters and basic structure. Slow mode: verify all chunks in depth
void VirtualSpaceNode::verify_locked() const {
assert_lock_strong(Metaspace_lock);
assert(base() != NULL, "Invalid base");
assert(base() != nullptr, "Invalid base");
assert(base() == (MetaWord*)_rs.base() &&
word_size() == _rs.size() / BytesPerWord,
"Sanity");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -172,7 +172,7 @@ public:
//// Chunk allocation, splitting, merging /////
// Allocate a root chunk from this node. Will fail and return NULL if the node is full
// Allocate a root chunk from this node. Will fail and return null if the node is full
// - if we used up the whole address space of this node's memory region.
// (in case this node backs compressed class space, this is how we hit
// CompressedClassSpaceSize).
@ -189,11 +189,11 @@ public:
// Given a chunk, attempt to merge it recursively with its neighboring chunks.
//
// If successful (merged at least once), returns address of
// the merged chunk; NULL otherwise.
// the merged chunk; null otherwise.
//
// The merged chunks are removed from the freelists.
//
// !!! Please note that if this method returns a non-NULL value, the
// !!! Please note that if this method returns a non-null value, the
// original chunk will be invalid and should not be accessed anymore! !!!
Metachunk* merge(Metachunk* c, FreeChunkListVector* freelists);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ void MetaspaceClosure::do_push(MetaspaceClosure::Ref* ref) {
read_only = ref->is_read_only_by_default();
}
if (_nest_level == 0) {
assert(_enclosing_ref == NULL, "must be");
assert(_enclosing_ref == nullptr, "must be");
}
_nest_level ++;
if (do_ref(ref, read_only)) { // true means we want to iterate the embedded pointer in <ref>
@ -76,7 +76,7 @@ void MetaspaceClosure::do_push(MetaspaceClosure::Ref* ref) {
void MetaspaceClosure::finish() {
assert(_nest_level == 0, "must be");
while (_pending_refs != NULL) {
while (_pending_refs != nullptr) {
Ref* ref = _pending_refs;
_pending_refs = _pending_refs->next();
do_push(ref);
@ -87,7 +87,7 @@ void MetaspaceClosure::finish() {
}
MetaspaceClosure::~MetaspaceClosure() {
assert(_pending_refs == NULL,
assert(_pending_refs == nullptr,
"you must explicitly call MetaspaceClosure::finish() to process all refs!");
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -117,7 +117,7 @@ public:
protected:
virtual void** mpp() const = 0;
Ref(Writability w) : _writability(w), _keep_after_pushing(false), _next(NULL), _user_data(NULL) {}
Ref(Writability w) : _writability(w), _keep_after_pushing(false), _next(nullptr), _user_data(nullptr) {}
public:
virtual bool not_null() const = 0;
virtual int size() const = 0;
@ -162,7 +162,7 @@ private:
MSORef(T** mpp, Writability w) : Ref(w), _mpp(mpp) {}
virtual bool is_read_only_by_default() const { return T::is_read_only_by_default(); }
virtual bool not_null() const { return dereference() != NULL; }
virtual bool not_null() const { return dereference() != nullptr; }
virtual int size() const { return dereference()->size(); }
virtual MetaspaceObj::Type msotype() const { return dereference()->type(); }
@ -189,7 +189,7 @@ private:
// all Arrays are read-only by default
virtual bool is_read_only_by_default() const { return true; }
virtual bool not_null() const { return dereference() != NULL; }
virtual bool not_null() const { return dereference() != nullptr; }
virtual int size() const { return dereference()->size(); }
virtual MetaspaceObj::Type msotype() const { return MetaspaceObj::array_type(sizeof(T)); }
};
@ -267,7 +267,7 @@ private:
void do_push(Ref* ref);
public:
MetaspaceClosure(): _pending_refs(NULL), _nest_level(0), _enclosing_ref(NULL) {}
MetaspaceClosure(): _pending_refs(nullptr), _nest_level(0), _enclosing_ref(nullptr) {}
~MetaspaceClosure();
void finish();
@ -283,7 +283,7 @@ public:
//
// Note that if we have stack overflow, do_pending_ref(r) will be called first and
// do_ref(r) will be called later, for the same r. In this case, enclosing_ref() is valid only
// when do_pending_ref(r) is called, and will return NULL when do_ref(r) is called.
// when do_pending_ref(r) is called, and will return null when do_ref(r) is called.
Ref* enclosing_ref() const {
return _enclosing_ref;
}
@ -345,13 +345,13 @@ public:
// Enable this block if you're changing the push(...) methods, to test for types that should be
// disallowed. Each of the following "push" calls should result in a compile-time error.
void test_disallowed_types(MetaspaceClosure* it) {
Hashtable<bool, mtInternal>* h = NULL;
Hashtable<bool, mtInternal>* h = nullptr;
it->push(&h);
Array<Hashtable<bool, mtInternal>*>* a6 = NULL;
Array<Hashtable<bool, mtInternal>*>* a6 = nullptr;
it->push(&a6);
Array<int*>* a7 = NULL;
Array<int*>* a7 = nullptr;
it->push(&a7);
}
#endif

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,10 +49,10 @@ class MetaspacePerfCounters {
}
public:
MetaspacePerfCounters() : _capacity(NULL), _used(NULL), _max_capacity(NULL) {}
MetaspacePerfCounters() : _capacity(nullptr), _used(nullptr), _max_capacity(nullptr) {}
void initialize(const char* ns) {
assert(_capacity == NULL, "Only initialize once");
assert(_capacity == nullptr, "Only initialize once");
EXCEPTION_MARK;
ResourceMark rm;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,8 +47,8 @@ public:
: _loader_data(loader_data),
_word_size(word_size),
_type(type),
_next(NULL),
_result(NULL),
_next(nullptr),
_result(nullptr),
_is_processed(false) {
MetaspaceCriticalAllocation::add(this);
}
@ -72,14 +72,14 @@ public:
};
volatile bool MetaspaceCriticalAllocation::_has_critical_allocation = false;
MetadataAllocationRequest* MetaspaceCriticalAllocation::_requests_head = NULL;
MetadataAllocationRequest* MetaspaceCriticalAllocation::_requests_tail = NULL;
MetadataAllocationRequest* MetaspaceCriticalAllocation::_requests_head = nullptr;
MetadataAllocationRequest* MetaspaceCriticalAllocation::_requests_tail = nullptr;
void MetaspaceCriticalAllocation::add(MetadataAllocationRequest* request) {
MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
log_info(metaspace)("Requesting critical metaspace allocation; almost out of memory");
Atomic::store(&_has_critical_allocation, true);
if (_requests_head == NULL) {
if (_requests_head == nullptr) {
_requests_head = _requests_tail = request;
} else {
_requests_tail->set_next(request);
@ -94,15 +94,15 @@ void MetaspaceCriticalAllocation::unlink(MetadataAllocationRequest* curr, Metada
if (_requests_tail == curr) {
_requests_tail = prev;
}
if (prev != NULL) {
if (prev != nullptr) {
prev->set_next(curr->next());
}
}
void MetaspaceCriticalAllocation::remove(MetadataAllocationRequest* request) {
MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
MetadataAllocationRequest* prev = NULL;
for (MetadataAllocationRequest* curr = _requests_head; curr != NULL; curr = curr->next()) {
MetadataAllocationRequest* prev = nullptr;
for (MetadataAllocationRequest* curr = _requests_head; curr != nullptr; curr = curr->next()) {
if (curr == request) {
unlink(curr, prev);
break;
@ -137,7 +137,7 @@ bool MetaspaceCriticalAllocation::try_allocate_critical(MetadataAllocationReques
{
MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag);
auto is_first_unprocessed = [&]() {
for (MetadataAllocationRequest* curr = _requests_head; curr != NULL; curr = curr->next()) {
for (MetadataAllocationRequest* curr = _requests_head; curr != nullptr; curr = curr->next()) {
if (!curr->is_processed()) {
// curr is the first not satisfied request
return curr == request;
@ -156,7 +156,7 @@ bool MetaspaceCriticalAllocation::try_allocate_critical(MetadataAllocationReques
// Try to ride on a previous GC and hope for early satisfaction
wait_for_purge(request);
return request->result() != NULL;
return request->result() != nullptr;
}
void MetaspaceCriticalAllocation::wait_for_purge(MetadataAllocationRequest* request) {
@ -183,17 +183,17 @@ void MetaspaceCriticalAllocation::block_if_concurrent_purge() {
void MetaspaceCriticalAllocation::process() {
assert_lock_strong(MetaspaceCritical_lock);
bool all_satisfied = true;
for (MetadataAllocationRequest* curr = _requests_head; curr != NULL; curr = curr->next()) {
if (curr->result() != NULL) {
for (MetadataAllocationRequest* curr = _requests_head; curr != nullptr; curr = curr->next()) {
if (curr->result() != nullptr) {
// Don't satisfy twice (can still be processed twice)
continue;
}
// Try to allocate metadata.
MetaWord* result = curr->loader_data()->metaspace_non_null()->allocate(curr->word_size(), curr->type());
if (result == NULL) {
if (result == nullptr) {
result = curr->loader_data()->metaspace_non_null()->expand_and_allocate(curr->word_size(), curr->type());
}
if (result == NULL) {
if (result == nullptr) {
all_satisfied = false;
}
curr->set_result(result);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -51,7 +51,7 @@ class MetaspaceGCThresholdUpdater : public AllStatic {
return "expand_and_allocate";
default:
assert(false, "Got bad updater: %d", (int) updater);
return NULL;
return nullptr;
};
}
};
@ -74,9 +74,9 @@ class MetaspaceGC : public AllStatic {
static size_t capacity_until_GC();
static bool inc_capacity_until_GC(size_t v,
size_t* new_cap_until_GC = NULL,
size_t* old_cap_until_GC = NULL,
bool* can_retry = NULL);
size_t* new_cap_until_GC = nullptr,
size_t* old_cap_until_GC = nullptr,
bool* can_retry = nullptr);
static size_t dec_capacity_until_GC(size_t v);
// The amount to increase the high-water-mark (_capacity_until_GC)

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,12 +74,12 @@ typeArrayOop oopFactory::new_longArray(int length, TRAPS) {
// create java.lang.Object[]
objArrayOop oopFactory::new_objectArray(int length, TRAPS) {
assert(Universe::objectArrayKlassObj() != NULL, "Too early?");
assert(Universe::objectArrayKlassObj() != nullptr, "Too early?");
return ObjArrayKlass::cast(Universe::objectArrayKlassObj())->allocate(length, THREAD);
}
typeArrayOop oopFactory::new_charArray(const char* utf8_str, TRAPS) {
int length = utf8_str == NULL ? 0 : UTF8::unicode_length(utf8_str);
int length = utf8_str == nullptr ? 0 : UTF8::unicode_length(utf8_str);
typeArrayOop result = new_charArray(length, CHECK_NULL);
if (length > 0) {
UTF8::convert_to_unicode(utf8_str, result->char_at_addr(0), length);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,7 @@ class Padded2DArray {
// The memory cannot be deleted since the raw memory chunk is not returned.
// Always uses mmap to reserve memory. Only the first few pages with the index to
// the rows are touched. Allocation size should be "large" to cover page overhead.
static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL);
static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = nullptr);
};
// Helper class to create an array of T objects. The array as a whole will

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,7 @@ T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint column
result[i] = (T*)((uintptr_t)data_start + i * row_size);
}
if (allocation_size != NULL) {
if (allocation_size != nullptr) {
*allocation_size = total_size;
}

@ -85,9 +85,9 @@
#include "utilities/preserveException.hpp"
// Known objects
Klass* Universe::_typeArrayKlassObjs[T_LONG+1] = { NULL /*, NULL...*/ };
Klass* Universe::_objectArrayKlassObj = NULL;
Klass* Universe::_fillerArrayKlassObj = NULL;
Klass* Universe::_typeArrayKlassObjs[T_LONG+1] = { nullptr /*, nullptr...*/ };
Klass* Universe::_objectArrayKlassObj = nullptr;
Klass* Universe::_fillerArrayKlassObj = nullptr;
OopHandle Universe::_basic_type_mirrors[T_VOID+1];
#if INCLUDE_CDS_JAVA_HEAP
int Universe::_archived_basic_type_mirror_indices[T_VOID+1];
@ -128,20 +128,20 @@ OopHandle Universe::_virtual_machine_error_instance;
OopHandle Universe::_reference_pending_list;
Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
LatestMethodCache* Universe::_finalizer_register_cache = NULL;
LatestMethodCache* Universe::_loader_addClass_cache = NULL;
LatestMethodCache* Universe::_throw_illegal_access_error_cache = NULL;
LatestMethodCache* Universe::_throw_no_such_method_error_cache = NULL;
LatestMethodCache* Universe::_do_stack_walk_cache = NULL;
Array<Klass*>* Universe::_the_array_interfaces_array = nullptr;
LatestMethodCache* Universe::_finalizer_register_cache = nullptr;
LatestMethodCache* Universe::_loader_addClass_cache = nullptr;
LatestMethodCache* Universe::_throw_illegal_access_error_cache = nullptr;
LatestMethodCache* Universe::_throw_no_such_method_error_cache = nullptr;
LatestMethodCache* Universe::_do_stack_walk_cache = nullptr;
long Universe::verify_flags = Universe::Verify_All;
Array<int>* Universe::_the_empty_int_array = NULL;
Array<u2>* Universe::_the_empty_short_array = NULL;
Array<Klass*>* Universe::_the_empty_klass_array = NULL;
Array<InstanceKlass*>* Universe::_the_empty_instance_klass_array = NULL;
Array<Method*>* Universe::_the_empty_method_array = NULL;
Array<int>* Universe::_the_empty_int_array = nullptr;
Array<u2>* Universe::_the_empty_short_array = nullptr;
Array<Klass*>* Universe::_the_empty_klass_array = nullptr;
Array<InstanceKlass*>* Universe::_the_empty_instance_klass_array = nullptr;
Array<Method*>* Universe::_the_empty_method_array = nullptr;
// These variables are guarded by FullGCALot_lock.
debug_only(OopHandle Universe::_fullgc_alot_dummy_array;)
@ -159,10 +159,10 @@ bool Universe::_bootstrapping = false;
bool Universe::_module_initialized = false;
bool Universe::_fully_initialized = false;
OopStorage* Universe::_vm_weak = NULL;
OopStorage* Universe::_vm_global = NULL;
OopStorage* Universe::_vm_weak = nullptr;
OopStorage* Universe::_vm_global = nullptr;
CollectedHeap* Universe::_collectedHeap = NULL;
CollectedHeap* Universe::_collectedHeap = nullptr;
objArrayOop Universe::the_empty_class_array () {
return (objArrayOop)_the_empty_class_array.resolve();
@ -248,7 +248,7 @@ void Universe::update_archived_basic_type_mirrors() {
int index = _archived_basic_type_mirror_indices[i];
if (!is_reference_type((BasicType)i) && index >= 0) {
oop mirror_oop = HeapShared::get_root(index);
assert(mirror_oop != NULL, "must be");
assert(mirror_oop != nullptr, "must be");
_basic_type_mirrors[i] = OopHandle(vm_global(), mirror_oop);
}
}
@ -301,14 +301,14 @@ void initialize_basic_type_klass(Klass* k, TRAPS) {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
assert(k->super() == ok, "u3");
if (k->is_instance_klass()) {
InstanceKlass::cast(k)->restore_unshareable_info(loader_data, Handle(), NULL, CHECK);
InstanceKlass::cast(k)->restore_unshareable_info(loader_data, Handle(), nullptr, CHECK);
} else {
ArrayKlass::cast(k)->restore_unshareable_info(loader_data, Handle(), CHECK);
}
} else
#endif
{
k->initialize_supers(ok, NULL, CHECK);
k->initialize_supers(ok, nullptr, CHECK);
}
k->append_to_sibling_list();
}
@ -337,7 +337,7 @@ void Universe::genesis(TRAPS) {
ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
_the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
_the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, nullptr, CHECK);
_the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
_the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
_the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
@ -393,7 +393,7 @@ void Universe::genesis(TRAPS) {
}
// Create a handle for reference_pending_list
_reference_pending_list = OopHandle(vm_global(), NULL);
_reference_pending_list = OopHandle(vm_global(), nullptr);
// Maybe this could be lifted up now that object array can be initialized
// during the bootstrapping.
@ -455,18 +455,18 @@ void Universe::initialize_basic_type_mirrors(TRAPS) {
#if INCLUDE_CDS_JAVA_HEAP
if (UseSharedSpaces &&
ArchiveHeapLoader::are_archived_mirrors_available() &&
_basic_type_mirrors[T_INT].resolve() != NULL) {
_basic_type_mirrors[T_INT].resolve() != nullptr) {
assert(ArchiveHeapLoader::can_use(), "Sanity");
// check that all basic type mirrors are mapped also
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
if (!is_reference_type((BasicType)i)) {
oop m = _basic_type_mirrors[i].resolve();
assert(m != NULL, "archived mirrors should not be NULL");
assert(m != nullptr, "archived mirrors should not be nullptr");
}
}
} else
// _basic_type_mirrors[T_INT], etc, are NULL if archived heap is not mapped.
// _basic_type_mirrors[T_INT], etc, are null if archived heap is not mapped.
#endif
{
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
@ -504,7 +504,7 @@ void Universe::fixup_mirrors(TRAPS) {
java_lang_Class::fixup_mirror(k, CATCH);
}
delete java_lang_Class::fixup_mirror_list();
java_lang_Class::set_fixup_mirror_list(NULL);
java_lang_Class::set_fixup_mirror_list(nullptr);
}
#define assert_pll_locked(test) \
@ -523,12 +523,12 @@ oop Universe::reference_pending_list() {
void Universe::clear_reference_pending_list() {
assert_pll_ownership();
_reference_pending_list.replace(NULL);
_reference_pending_list.replace(nullptr);
}
bool Universe::has_reference_pending_list() {
assert_pll_ownership();
return _reference_pending_list.peek() != NULL;
return _reference_pending_list.peek() != nullptr;
}
oop Universe::swap_reference_pending_list(oop list) {
@ -647,15 +647,15 @@ oop Universe::gen_out_of_memory_error(oop default_err) {
} else {
JavaThread* current = JavaThread::current();
Handle default_err_h(current, default_err);
// get the error object at the slot and set set it to NULL so that the
// get the error object at the slot and set set it to null so that the
// array isn't keeping it alive anymore.
Handle exc(current, preallocated_out_of_memory_errors()->obj_at(next));
assert(exc() != NULL, "slot has been used already");
preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
assert(exc() != nullptr, "slot has been used already");
preallocated_out_of_memory_errors()->obj_at_put(next, nullptr);
// use the message from the default error
oop msg = java_lang_Throwable::message(default_err_h());
assert(msg != NULL, "no message");
assert(msg != nullptr, "no message");
java_lang_Throwable::set_message(exc(), msg);
// populate the stack trace and return it.
@ -839,7 +839,7 @@ jint universe_init() {
}
jint Universe::initialize_heap() {
assert(_collectedHeap == NULL, "Heap already created");
assert(_collectedHeap == nullptr, "Heap already created");
_collectedHeap = GCConfig::arguments()->create_heap();
log_info(gc)("Using %s", _collectedHeap->name());
@ -881,7 +881,7 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
"must be exactly of required size and alignment");
// We are good.
if (AllocateHeapAt != NULL) {
if (AllocateHeapAt != nullptr) {
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
}
@ -927,10 +927,10 @@ void initialize_known_method(LatestMethodCache* method_cache,
bool is_static, TRAPS)
{
TempNewSymbol name = SymbolTable::new_symbol(method);
Method* m = NULL;
Method* m = nullptr;
// The klass must be linked before looking up the method.
if (!ik->link_class_or_fail(THREAD) ||
((m = ik->find_method(name, signature)) == NULL) ||
((m = ik->find_method(name, signature)) == nullptr) ||
is_static != m->is_static()) {
ResourceMark rm(THREAD);
// NoSuchMethodException doesn't actually work because it tries to run the
@ -1081,7 +1081,7 @@ void Universe::initialize_verify_flags() {
char* save_ptr;
char* token = strtok_r(subset_list, delimiter, &save_ptr);
while (token != NULL) {
while (token != nullptr) {
if (strcmp(token, "threads") == 0) {
verify_flags |= Verify_Threads;
} else if (strcmp(token, "heap") == 0) {
@ -1109,7 +1109,7 @@ void Universe::initialize_verify_flags() {
} else {
vm_exit_during_initialization(err_msg("VerifySubSet: \'%s\' memory sub-system is unknown, please correct it", token));
}
token = strtok_r(NULL, delimiter, &save_ptr);
token = strtok_r(nullptr, delimiter, &save_ptr);
}
FREE_C_HEAP_ARRAY(char, subset_list);
}
@ -1249,7 +1249,7 @@ void LatestMethodCache::init(Klass* k, Method* m) {
#ifndef PRODUCT
else {
// sharing initialization should have already set up _klass
assert(_klass != NULL, "just checking");
assert(_klass != nullptr, "just checking");
}
#endif
@ -1259,10 +1259,10 @@ void LatestMethodCache::init(Klass* k, Method* m) {
Method* LatestMethodCache::get_method() {
if (klass() == NULL) return NULL;
if (klass() == nullptr) return nullptr;
InstanceKlass* ik = InstanceKlass::cast(klass());
Method* m = ik->method_with_idnum(method_idnum());
assert(m != NULL, "sanity check");
assert(m != nullptr, "sanity check");
return m;
}
@ -1271,16 +1271,16 @@ Method* LatestMethodCache::get_method() {
bool Universe::release_fullgc_alot_dummy() {
MutexLocker ml(FullGCALot_lock);
objArrayOop fullgc_alot_dummy_array = (objArrayOop)_fullgc_alot_dummy_array.resolve();
if (fullgc_alot_dummy_array != NULL) {
if (fullgc_alot_dummy_array != nullptr) {
if (_fullgc_alot_dummy_next >= fullgc_alot_dummy_array->length()) {
// No more dummies to release, release entire array instead
_fullgc_alot_dummy_array.release(Universe::vm_global());
_fullgc_alot_dummy_array = OopHandle(); // NULL out OopStorage pointer.
_fullgc_alot_dummy_array = OopHandle(); // null out OopStorage pointer.
return false;
}
// Release dummy at bottom of old generation
fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, nullptr);
}
return true;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,8 +56,8 @@ class LatestMethodCache : public CHeapObj<mtClass> {
int _method_idnum;
public:
LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
~LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
LatestMethodCache() { _klass = nullptr; _method_idnum = -1; }
~LatestMethodCache() { _klass = nullptr; _method_idnum = -1; }
void init(Klass* k, Method* m);
Klass* klass() const { return _klass; }
@ -185,7 +185,7 @@ class Universe: AllStatic {
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
assert(m != NULL, "mirror not initialized");
assert(m != nullptr, "mirror not initialized");
return m;
}
@ -226,7 +226,7 @@ class Universe: AllStatic {
static Klass* typeArrayKlassObj(BasicType t) {
assert((uint)t >= T_BOOLEAN, "range check for type: %s", type2name(t));
assert((uint)t < T_LONG+1, "range check for type: %s", type2name(t));
assert(_typeArrayKlassObjs[t] != NULL, "domain check");
assert(_typeArrayKlassObjs[t] != nullptr, "domain check");
return _typeArrayKlassObjs[t];
}
@ -322,7 +322,7 @@ class Universe: AllStatic {
DEBUG_ONLY(static bool is_gc_active();)
DEBUG_ONLY(static bool is_in_heap(const void* p);)
DEBUG_ONLY(static bool is_in_heap_or_null(const void* p) { return p == NULL || is_in_heap(p); })
DEBUG_ONLY(static bool is_in_heap_or_null(const void* p) { return p == nullptr || is_in_heap(p); })
// Reserve Java heap and determine CompressedOops mode
static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
// ReservedSpace
// Dummy constructor
ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
_alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
}
@ -50,7 +50,7 @@ ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
// large and normal pages.
size_t page_size = os::page_size_for_region_unaligned(size, 1);
size_t alignment = os::vm_allocation_granularity();
initialize(size, alignment, page_size, NULL, false);
initialize(size, alignment, page_size, nullptr, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
@ -62,7 +62,7 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_
alignment = MAX2(preferred_page_size, alignment);
size = align_up(size, alignment);
}
initialize(size, alignment, preferred_page_size, NULL, false);
initialize(size, alignment, preferred_page_size, nullptr, false);
}
ReservedSpace::ReservedSpace(size_t size,
@ -116,11 +116,11 @@ static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped
// Helper method
static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
if (base == requested_address || requested_address == NULL) {
if (base == requested_address || requested_address == nullptr) {
return false; // did not fail
}
if (base != NULL) {
if (base != nullptr) {
// Different reserve address may be acceptable in other cases
// but for compressed oops heap should be at requested address.
assert(UseCompressedOops, "currently requested address used only for compressed oops");
@ -157,7 +157,7 @@ static char* reserve_memory(char* requested_address, const size_t size,
char* base;
// If the memory was requested at a particular address, use
// os::attempt_reserve_memory_at() to avoid mapping over something
// important. If the reservation fails, return NULL.
// important. If the reservation fails, return null.
if (requested_address != 0) {
assert(is_aligned(requested_address, alignment),
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
@ -190,7 +190,7 @@ static char* reserve_memory_special(char* requested_address, const size_t size,
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
if (base != NULL) {
if (base != nullptr) {
// Check alignment constraints.
assert(is_aligned(base, alignment),
"reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
@ -201,7 +201,7 @@ static char* reserve_memory_special(char* requested_address, const size_t size,
}
void ReservedSpace::clear_members() {
initialize_members(NULL, 0, 0, 0, false, false);
initialize_members(nullptr, 0, 0, 0, false, false);
}
void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
@ -235,7 +235,7 @@ void ReservedSpace::reserve(size_t size,
// large pages are allocated is up to the filesystem of the backing file.
// So UseLargePages is not taken into account for this reservation.
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
if (base != NULL) {
if (base != nullptr) {
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
}
// Always return, not possible to fall back to reservation not using a file.
@ -250,7 +250,7 @@ void ReservedSpace::reserve(size_t size,
// no reservations are lost.
do {
char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
if (base != NULL) {
if (base != nullptr) {
// Successful reservation using large pages.
initialize_members(base, size, alignment, page_size, true, executable);
return;
@ -266,7 +266,7 @@ void ReservedSpace::reserve(size_t size,
// == Case 3 ==
char* base = reserve_memory(requested_address, size, alignment, -1, executable);
if (base != NULL) {
if (base != nullptr) {
// Successful mapping.
initialize_members(base, size, alignment, page_size, false, executable);
}
@ -395,7 +395,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
size_t alignment,
size_t page_size,
char* requested_address) {
if (_base != NULL) {
if (_base != nullptr) {
// We tried before, but we didn't like the address delivered.
release();
}
@ -436,7 +436,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
char* attach_point = highest_start;
while (attach_point >= lowest_start &&
attach_point <= highest_start && // Avoid wrap around.
((_base == NULL) ||
((_base == nullptr) ||
(_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
try_reserve_heap(size, alignment, page_size, attach_point);
attach_point -= stepsize;
@ -449,7 +449,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
// Helper for heap allocation. Returns an array with addresses
// (OS-specific) which are suited for disjoint base mode. Array is
// NULL terminated.
// null terminated.
static char** get_attach_addresses_for_disjoint_mode() {
static uint64_t addresses[] = {
2 * SIZE_32G,
@ -522,7 +522,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
}
// Keep heap at HeapBaseMinAddress.
if (_base == NULL) {
if (_base == nullptr) {
// Try to allocate the heap at addresses that allow efficient oop compression.
// Different schemes are tried, in order of decreasing optimization potential.
@ -559,7 +559,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Give it several tries from top of range to bottom.
if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
((_base == NULL) || // No previous try succeeded.
((_base == nullptr) || // No previous try succeeded.
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
// Calc address range within we try to attach (range of possible start addresses).
@ -584,7 +584,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
char** addresses = get_attach_addresses_for_disjoint_mode();
int i = 0;
while (addresses[i] && // End of array not yet reached.
((_base == NULL) || // No previous try succeeded.
((_base == nullptr) || // No previous try succeeded.
(_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
!CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
char* const attach_point = addresses[i];
@ -594,9 +594,9 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
}
// Last, desperate try without any placement.
if (_base == NULL) {
log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
initialize(size + noaccess_prefix, alignment, page_size, NULL, false);
if (_base == nullptr) {
log_trace(gc, heap, coops)("Trying to allocate at address nullptr heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
}
}
}
@ -607,7 +607,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
return;
}
if (heap_allocation_directory != NULL) {
if (heap_allocation_directory != nullptr) {
_fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
if (_fd_for_heap == -1) {
vm_exit_during_initialization(
@ -633,7 +633,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
establish_noaccess_prefix();
}
} else {
initialize(size, alignment, page_size, NULL, false);
initialize(size, alignment, page_size, nullptr, false);
}
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
@ -641,7 +641,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
if (base() != NULL) {
if (base() != nullptr) {
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
}
@ -659,23 +659,23 @@ MemRegion ReservedHeapSpace::region() const {
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
size_t rs_align,
size_t rs_page_size) : ReservedSpace() {
initialize(r_size, rs_align, rs_page_size, /*requested address*/ NULL, /*executable*/ true);
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
MemTracker::record_virtual_memory_type((address)base(), mtCode);
}
// VirtualSpace
VirtualSpace::VirtualSpace() {
_low_boundary = NULL;
_high_boundary = NULL;
_low = NULL;
_high = NULL;
_lower_high = NULL;
_middle_high = NULL;
_upper_high = NULL;
_lower_high_boundary = NULL;
_middle_high_boundary = NULL;
_upper_high_boundary = NULL;
_low_boundary = nullptr;
_high_boundary = nullptr;
_low = nullptr;
_high = nullptr;
_lower_high = nullptr;
_middle_high = nullptr;
_upper_high = nullptr;
_lower_high_boundary = nullptr;
_middle_high_boundary = nullptr;
_upper_high_boundary = nullptr;
_lower_alignment = 0;
_middle_alignment = 0;
_upper_alignment = 0;
@ -691,7 +691,7 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
if(!rs.is_reserved()) return false; // allocation failed.
assert(_low_boundary == NULL, "VirtualSpace already initialized");
assert(_low_boundary == nullptr, "VirtualSpace already initialized");
assert(max_commit_granularity > 0, "Granularity must be non-zero.");
_low_boundary = rs.base();
@ -745,16 +745,16 @@ VirtualSpace::~VirtualSpace() {
void VirtualSpace::release() {
// This does not release memory it reserved.
// Caller must release via rs.release();
_low_boundary = NULL;
_high_boundary = NULL;
_low = NULL;
_high = NULL;
_lower_high = NULL;
_middle_high = NULL;
_upper_high = NULL;
_lower_high_boundary = NULL;
_middle_high_boundary = NULL;
_upper_high_boundary = NULL;
_low_boundary = nullptr;
_high_boundary = nullptr;
_low = nullptr;
_high = nullptr;
_lower_high = nullptr;
_middle_high = nullptr;
_upper_high = nullptr;
_lower_high_boundary = nullptr;
_middle_high_boundary = nullptr;
_upper_high_boundary = nullptr;
_lower_alignment = 0;
_middle_alignment = 0;
_upper_alignment = 0;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ class ReservedSpace {
// aligned up to the final alignment in this case.
ReservedSpace(size_t size, size_t preferred_page_size);
ReservedSpace(size_t size, size_t alignment, size_t page_size,
char* requested_address = NULL);
char* requested_address = nullptr);
// Accessors
char* base() const { return _base; }
@ -88,7 +88,7 @@ class ReservedSpace {
bool special() const { return _special; }
bool executable() const { return _executable; }
size_t noaccess_prefix() const { return _noaccess_prefix; }
bool is_reserved() const { return _base != NULL; }
bool is_reserved() const { return _base != nullptr; }
void release();
// Splitting
@ -135,7 +135,7 @@ class ReservedHeapSpace : public ReservedSpace {
// Constructor. Tries to find a heap that is good for compressed oops.
// heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated
// on the device which is managed by the file system where the directory resides.
ReservedHeapSpace(size_t size, size_t forced_base_alignment, size_t page_size, const char* heap_allocation_directory = NULL);
ReservedHeapSpace(size_t size, size_t forced_base_alignment, size_t page_size, const char* heap_allocation_directory = nullptr);
// Returns the base to be used for compression, i.e. so that null can be
// encoded safely and implicit null checks can work.
char *compressed_oop_base() const { return _base - _noaccess_prefix; }