8301179: Replace NULL with nullptr in share/gc/serial/

Reviewed-by: tschatzl, kbarrett
This commit is contained in:
Johan Sjölen 2023-01-27 09:36:23 +00:00
parent b77abc6a0d
commit 107e184d59
7 changed files with 44 additions and 44 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,7 @@ void CLDScanClosure::do_cld(ClassLoaderData* cld) {
// Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
_scavenge_closure->set_scanned_cld(NULL);
_scavenge_closure->set_scanned_cld(nullptr);
}
}
@ -181,7 +181,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters();
_old_gen = NULL;
_old_gen = nullptr;
_tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
@ -271,7 +271,7 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
from()->set_next_compaction_space(NULL);
from()->set_next_compaction_space(nullptr);
}
void DefNewGeneration::swap_spaces() {
@ -282,7 +282,7 @@ void DefNewGeneration::swap_spaces() {
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
from()->set_next_compaction_space(NULL);
from()->set_next_compaction_space(nullptr);
if (UsePerfData) {
CSpaceCounters* c = _from_counters;
@ -482,7 +482,7 @@ HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
// again later with the Heap_lock held.
bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
HeapWord* result = NULL;
HeapWord* result = nullptr;
if (do_alloc) {
result = from()->allocate(size);
}
@ -495,7 +495,7 @@ HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
from()->free(),
should_try_alloc ? "" : " should_allocate_from_space: NOT",
do_alloc ? " Heap_lock is not owned by self" : "",
result == NULL ? "NULL" : "object");
result == nullptr ? "null" : "object");
return result;
}
@ -546,7 +546,7 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure();
GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());
GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
heap->trace_heap_before_gc(_gc_tracer);
@ -664,7 +664,7 @@ void DefNewGeneration::collect(bool full,
void DefNewGeneration::init_assuming_no_promotion_failure() {
_promotion_failed = false;
_promotion_failed_info.reset();
from()->set_next_compaction_space(NULL);
from()->set_next_compaction_space(nullptr);
}
void DefNewGeneration::remove_forwarding_pointers() {
@ -687,7 +687,7 @@ void DefNewGeneration::remove_forwarding_pointers() {
}
void DefNewGeneration::restore_preserved_marks() {
_preserved_marks_set.restore(NULL);
_preserved_marks_set.restore(nullptr);
}
void DefNewGeneration::handle_promotion_failure(oop old) {
@ -716,7 +716,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
oop obj = NULL;
oop obj = nullptr;
// Try allocating obj in to-space (unless too old)
if (old->age() < tenuring_threshold()) {
@ -725,9 +725,9 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
bool new_obj_is_tenured = false;
// Otherwise try allocating obj tenured
if (obj == NULL) {
if (obj == nullptr) {
obj = _old_gen->promote(old, s);
if (obj == NULL) {
if (obj == nullptr) {
handle_promotion_failure(old);
return old;
}
@ -816,7 +816,7 @@ bool DefNewGeneration::collection_attempt_is_safe() {
log_trace(gc)(":: to is not empty ::");
return false;
}
if (_old_gen == NULL) {
if (_old_gen == nullptr) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_old_gen = gch->old_gen();
}
@ -926,7 +926,7 @@ HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
// Note that since DefNewGeneration supports lock-free allocation, we
// have to use it here, as well.
HeapWord* result = eden()->par_allocate(word_size);
if (result == NULL) {
if (result == nullptr) {
// If the eden is full and the last collection bailed out, we are running
// out of heap space, and we try to allocate the from-space, too.
// allocate_from_space can't be inlined because that would introduce a

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,8 +74,8 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
#endif
// hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL");
assert(ref_processor() == nullptr, "no stomping");
assert(rp != nullptr, "should be non-null");
set_ref_processor(rp);
gch->trace_heap_before_gc(_gc_tracer);
@ -134,7 +134,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
gch->prune_scavengable_nmethods();
// refs processing: clean slate
set_ref_processor(NULL);
set_ref_processor(nullptr);
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
@ -153,7 +153,7 @@ void GenMarkSweep::allocate_stacks() {
// $$$ To cut a corner, we'll only use the first scratch block, and then
// revert to malloc.
if (scratch != NULL) {
if (scratch != nullptr) {
_preserved_count_max =
scratch->num_words * HeapWordSize / sizeof(PreservedMark);
} else {
@ -185,7 +185,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
{
StrongRootsScope srs(0);
CLDClosure* weak_cld_closure = ClassUnloading ? NULL : &follow_cld_closure;
CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
MarkingCodeBlobClosure mark_code_closure(&follow_root_closure, !CodeBlobToOopClosure::FixRelocations, true);
gch->process_roots(GenCollectedHeap::SO_None,
&follow_root_closure,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,12 +52,12 @@ Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack;
Stack<PreservedMark, mtGC> MarkSweep::_preserved_overflow_stack;
size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL;
ReferenceProcessor* MarkSweep::_ref_processor = NULL;
STWGCTimer* MarkSweep::_gc_timer = NULL;
SerialOldTracer* MarkSweep::_gc_tracer = NULL;
PreservedMark* MarkSweep::_preserved_marks = nullptr;
ReferenceProcessor* MarkSweep::_ref_processor = nullptr;
STWGCTimer* MarkSweep::_gc_timer = nullptr;
SerialOldTracer* MarkSweep::_gc_tracer = nullptr;
StringDedup::Requests* MarkSweep::_string_dedup_requests = NULL;
StringDedup::Requests* MarkSweep::_string_dedup_requests = nullptr;
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
size_t init_word_size):
_reserved(reserved), _end(NULL)
_reserved(reserved), _end(nullptr)
{
size_t size = compute_size(reserved.word_size());
ReservedSpace rs(size);
@ -232,7 +232,7 @@ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const
void
BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
assert(blk_start != NULL && blk_end > blk_start,
assert(blk_start != nullptr && blk_end > blk_start,
"phantom block");
single_block(blk_start, blk_end);
}
@ -299,11 +299,11 @@ void BlockOffsetArray::verify() const {
size_t next_index = 1;
size_t last_index = last_active_index();
// Use for debugging. Initialize to NULL to distinguish the
// Use for debugging. Initialize to null to distinguish the
// first iteration through the while loop.
HeapWord* last_p = NULL;
HeapWord* last_start = NULL;
oop last_o = NULL;
HeapWord* last_p = nullptr;
HeapWord* last_start = nullptr;
oop last_o = nullptr;
while (next_index <= last_index) {
// Use an address past the start of the address for
@ -387,7 +387,7 @@ HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) cons
void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
HeapWord* blk_end) {
assert(blk_start != NULL && blk_end > blk_start,
assert(blk_start != nullptr && blk_end > blk_start,
"phantom block");
assert(blk_end > _next_offset_threshold,
"should be past threshold");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -336,7 +336,7 @@ class BlockOffsetArray: public BlockOffsetTable {
// Corresponding setter
void set_init_to_zero(bool val) {
_init_to_zero = val;
assert(_array != NULL, "_array should be non-NULL");
assert(_array != nullptr, "_array should be non-null");
_array->set_init_to_zero(val);
}
@ -366,7 +366,7 @@ class BlockOffsetArrayContigSpace: public BlockOffsetArray {
public:
BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
BlockOffsetArray(array, mr, true) {
_next_offset_threshold = NULL;
_next_offset_threshold = nullptr;
_next_offset_index = 0;
}
@ -384,7 +384,7 @@ class BlockOffsetArrayContigSpace: public BlockOffsetArray {
// In general, these methods expect to be called with
// [blk_start, blk_end) representing a block of memory in the heap.
// In this implementation, however, we are OK even if blk_start and/or
// blk_end are NULL because NULL is represented as 0, and thus
// blk_end are null because null is represented as 0, and thus
// never exceeds the "_next_offset_threshold".
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
if (blk_end > _next_offset_threshold) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
if (addr >= _bottom && addr < _end) {
return block_start_unsafe(addr);
} else {
return NULL;
return nullptr;
}
}

View File

@ -42,9 +42,9 @@ SerialHeap::SerialHeap() :
GenCollectedHeap(Generation::DefNew,
Generation::MarkSweepCompact,
"Copy:MSC"),
_eden_pool(NULL),
_survivor_pool(NULL),
_old_pool(NULL) {
_eden_pool(nullptr),
_survivor_pool(nullptr),
_old_pool(nullptr) {
_young_manager = new GCMemoryManager("Copy", "end of minor GC");
_old_manager = new GCMemoryManager("MarkSweepCompact", "end of major GC");
}