8301180: Replace NULL with nullptr in share/gc/parallel/
Reviewed-by: stefank, ayang
This commit is contained in:
parent
d7aa87fdbd
commit
3758487fda
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -194,7 +194,7 @@ GCAdaptivePolicyCounters::GCAdaptivePolicyCounters(const char* name,
|
||||
}
|
||||
|
||||
void GCAdaptivePolicyCounters::update_counters_from_policy() {
|
||||
if (UsePerfData && (size_policy() != NULL)) {
|
||||
if (UsePerfData && (size_policy() != nullptr)) {
|
||||
update_avg_minor_pause_counter();
|
||||
update_avg_minor_interval_counter();
|
||||
#ifdef NOT_PRODUCT
|
||||
|
@ -168,7 +168,7 @@ size_t MutableNUMASpace::free_in_words() const {
|
||||
|
||||
|
||||
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
|
||||
guarantee(thr != NULL, "No thread");
|
||||
guarantee(thr != nullptr, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
if (lgrp_id == -1) {
|
||||
// This case can occur after the topology of the system has
|
||||
@ -193,7 +193,7 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
|
||||
|
||||
size_t MutableNUMASpace::tlab_used(Thread *thr) const {
|
||||
// Please see the comments for tlab_capacity().
|
||||
guarantee(thr != NULL, "No thread");
|
||||
guarantee(thr != nullptr, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
if (lgrp_id == -1) {
|
||||
if (lgrp_spaces()->length() > 0) {
|
||||
@ -213,7 +213,7 @@ size_t MutableNUMASpace::tlab_used(Thread *thr) const {
|
||||
|
||||
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
||||
// Please see the comments for tlab_capacity().
|
||||
guarantee(thr != NULL, "No thread");
|
||||
guarantee(thr != nullptr, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
if (lgrp_id == -1) {
|
||||
if (lgrp_spaces()->length() > 0) {
|
||||
@ -587,8 +587,8 @@ void MutableNUMASpace::initialize(MemRegion mr,
|
||||
if (!old_region.equals(region())) {
|
||||
new_region = MemRegion(rounded_bottom, rounded_end);
|
||||
MemRegion intersection = new_region.intersection(old_region);
|
||||
if (intersection.start() == NULL ||
|
||||
intersection.end() == NULL ||
|
||||
if (intersection.start() == nullptr ||
|
||||
intersection.end() == nullptr ||
|
||||
prev_page_size > page_size()) { // If the page size got smaller we have to change
|
||||
// the page size preference for the whole space.
|
||||
intersection = MemRegion(new_region.start(), new_region.start());
|
||||
@ -663,7 +663,7 @@ void MutableNUMASpace::initialize(MemRegion mr,
|
||||
|
||||
MemRegion intersection = old_region.intersection(new_region);
|
||||
|
||||
if (intersection.start() == NULL || intersection.end() == NULL) {
|
||||
if (intersection.start() == nullptr || intersection.end() == nullptr) {
|
||||
intersection = MemRegion(new_region.start(), new_region.start());
|
||||
}
|
||||
|
||||
@ -783,19 +783,19 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||
MutableSpace *s = ls->space();
|
||||
HeapWord *p = s->cas_allocate(size);
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
size_t remainder = pointer_delta(s->end(), p + size);
|
||||
if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
|
||||
if (s->cas_deallocate(p, size)) {
|
||||
// We were the last to allocate and created a fragment less than
|
||||
// a minimal object.
|
||||
p = NULL;
|
||||
p = nullptr;
|
||||
} else {
|
||||
guarantee(false, "Deallocation should always succeed");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
HeapWord* cur_top, *cur_chunk_top = p + size;
|
||||
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
|
||||
if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) {
|
||||
@ -805,12 +805,12 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||
}
|
||||
|
||||
// Make the page allocation happen here if there is no static binding.
|
||||
if (p != NULL && !os::numa_has_static_binding() ) {
|
||||
if (p != nullptr && !os::numa_has_static_binding() ) {
|
||||
for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
|
||||
*(int*)i = 0;
|
||||
}
|
||||
}
|
||||
if (p == NULL) {
|
||||
if (p == nullptr) {
|
||||
ls->set_allocation_failed();
|
||||
}
|
||||
return p;
|
||||
@ -911,7 +911,7 @@ void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count
|
||||
char *s = scan_start;
|
||||
while (s < scan_end) {
|
||||
char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
|
||||
if (e == NULL) {
|
||||
if (e == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (e != scan_end) {
|
||||
|
@ -88,7 +88,7 @@ class MutableNUMASpace : public MutableSpace {
|
||||
char* last_page_scanned() { return _last_page_scanned; }
|
||||
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
||||
public:
|
||||
LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _allocation_failed(false), _last_page_scanned(NULL) {
|
||||
LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _allocation_failed(false), _last_page_scanned(nullptr) {
|
||||
_space = new MutableSpace(alignment);
|
||||
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
||||
}
|
||||
@ -198,7 +198,7 @@ class MutableNUMASpace : public MutableSpace {
|
||||
bool clear_space,
|
||||
bool mangle_space,
|
||||
bool setup_pages = SetupPages,
|
||||
WorkerThreads* pretouch_workers = NULL);
|
||||
WorkerThreads* pretouch_workers = nullptr);
|
||||
// Update space layout if necessary. Do all adaptive resizing job.
|
||||
virtual void update();
|
||||
// Update allocation rate averages.
|
||||
@ -223,7 +223,7 @@ class MutableNUMASpace : public MutableSpace {
|
||||
virtual size_t tlab_used(Thread* thr) const;
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||
|
||||
// Allocation (return NULL if full)
|
||||
// Allocation (return null if full)
|
||||
virtual HeapWord* cas_allocate(size_t word_size);
|
||||
|
||||
// Debugging
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,12 +36,12 @@
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
MutableSpace::MutableSpace(size_t alignment) :
|
||||
_mangler(NULL),
|
||||
_mangler(nullptr),
|
||||
_last_setup_region(),
|
||||
_alignment(alignment),
|
||||
_bottom(NULL),
|
||||
_top(NULL),
|
||||
_end(NULL)
|
||||
_bottom(nullptr),
|
||||
_top(nullptr),
|
||||
_end(nullptr)
|
||||
{
|
||||
assert(MutableSpace::alignment() % os::vm_page_size() == 0,
|
||||
"Space should be aligned");
|
||||
@ -203,7 +203,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
|
||||
"checking alignment");
|
||||
return obj;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
@ -256,7 +256,7 @@ void MutableSpace::print_on(outputStream* st) const {
|
||||
void MutableSpace::verify() {
|
||||
HeapWord* p = bottom();
|
||||
HeapWord* t = top();
|
||||
HeapWord* prev_p = NULL;
|
||||
HeapWord* prev_p = nullptr;
|
||||
while (p < t) {
|
||||
oopDesc::verify(cast_to_oop(p));
|
||||
prev_p = p;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,7 +101,7 @@ class MutableSpace: public CHeapObj<mtGC> {
|
||||
bool clear_space,
|
||||
bool mangle_space,
|
||||
bool setup_pages = SetupPages,
|
||||
WorkerThreads* pretouch_workers = NULL);
|
||||
WorkerThreads* pretouch_workers = nullptr);
|
||||
|
||||
virtual void clear(bool mangle_space);
|
||||
virtual void update() { }
|
||||
@ -137,7 +137,7 @@ class MutableSpace: public CHeapObj<mtGC> {
|
||||
virtual size_t tlab_used(Thread* thr) const { return used_in_bytes(); }
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes(); }
|
||||
|
||||
// Allocation (return NULL if full)
|
||||
// Allocation (return null if full)
|
||||
virtual HeapWord* cas_allocate(size_t word_size);
|
||||
// Optional deallocation. Used in NUMA-allocator.
|
||||
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,7 +57,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
|
||||
|
||||
_virtual_space = new PSVirtualSpace(rs, page_sz);
|
||||
if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
|
||||
if (_virtual_space != nullptr && _virtual_space->expand_by(_reserved_byte_size)) {
|
||||
_region_start = covered_region.start();
|
||||
_region_size = covered_region.word_size();
|
||||
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
|
||||
@ -68,9 +68,9 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
|
||||
_region_start = 0;
|
||||
_region_size = 0;
|
||||
if (_virtual_space != NULL) {
|
||||
if (_virtual_space != nullptr) {
|
||||
delete _virtual_space;
|
||||
_virtual_space = NULL;
|
||||
_virtual_space = nullptr;
|
||||
// Release memory reserved in the space.
|
||||
rs.release();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
inline ParMarkBitMap::ParMarkBitMap():
|
||||
_region_start(NULL), _region_size(0), _beg_bits(), _end_bits(), _virtual_space(NULL), _reserved_byte_size(0)
|
||||
_region_start(nullptr), _region_size(0), _beg_bits(), _end_bits(), _virtual_space(nullptr), _reserved_byte_size(0)
|
||||
{ }
|
||||
|
||||
inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end) {
|
||||
|
@ -56,10 +56,10 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
|
||||
PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
|
||||
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
|
||||
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
|
||||
PSYoungGen* ParallelScavengeHeap::_young_gen = nullptr;
|
||||
PSOldGen* ParallelScavengeHeap::_old_gen = nullptr;
|
||||
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
|
||||
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
|
||||
|
||||
jint ParallelScavengeHeap::initialize() {
|
||||
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
|
||||
@ -283,7 +283,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
uint gc_count = 0;
|
||||
uint gclocker_stalled_count = 0;
|
||||
|
||||
while (result == NULL) {
|
||||
while (result == nullptr) {
|
||||
// We don't want to have multiple collections for a single filled generation.
|
||||
// To prevent this, each thread tracks the total_collections() value, and if
|
||||
// the count has changed, does not do a new collection.
|
||||
@ -300,18 +300,18 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
gc_count = total_collections();
|
||||
|
||||
result = young_gen()->allocate(size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// If certain conditions hold, try allocating from the old gen.
|
||||
result = mem_allocate_old_gen(size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Failed to allocate without a gc.
|
||||
@ -333,12 +333,12 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
fatal("Possible deadlock due to allocating while"
|
||||
" in jni critical section");
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
// Generate a VM operation
|
||||
VM_ParallelGCFailedAllocation op(size, gc_count);
|
||||
VMThread::execute(&op);
|
||||
@ -352,16 +352,16 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
// If GC was locked out during VM operation then retry allocation
|
||||
// and/or stall as necessary.
|
||||
if (op.gc_locked()) {
|
||||
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
|
||||
assert(op.result() == nullptr, "must be null if gc_locked() is true");
|
||||
continue; // retry and/or stall as necessary
|
||||
}
|
||||
|
||||
// Exit the loop if the gc time limit has been exceeded.
|
||||
// The allocation must have failed above ("result" guarding
|
||||
// this path is NULL) and the most recent collection has exceeded the
|
||||
// this path is null) and the most recent collection has exceeded the
|
||||
// gc overhead limit (although enough may have been collected to
|
||||
// satisfy the allocation). Exit the loop so that an out-of-memory
|
||||
// will be thrown (return a NULL ignoring the contents of
|
||||
// will be thrown (return a null ignoring the contents of
|
||||
// op.result()),
|
||||
// but clear gc_overhead_limit_exceeded so that the next collection
|
||||
// starts with a clean slate (i.e., forgets about previous overhead
|
||||
@ -373,11 +373,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
if (limit_exceeded && softrefs_clear) {
|
||||
*gc_overhead_limit_was_exceeded = true;
|
||||
size_policy()->set_gc_overhead_limit_exceeded(false);
|
||||
log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
|
||||
if (op.result() != NULL) {
|
||||
log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
|
||||
if (op.result() != nullptr) {
|
||||
CollectedHeap::fill_with_object(op.result(), size);
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return op.result();
|
||||
@ -387,7 +387,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
// The policy object will prevent us from looping forever. If the
|
||||
// time spent in gc crosses a threshold, we will bail out.
|
||||
loop_count++;
|
||||
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
|
||||
if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
|
||||
(loop_count % QueuedAllocationWarningCount == 0)) {
|
||||
log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
|
||||
log_warning(gc)("\tsize=" SIZE_FORMAT, size);
|
||||
@ -405,7 +405,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
// that young gen allocation attempt.
|
||||
void
|
||||
ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
|
||||
if (addr != NULL) {
|
||||
if (addr != nullptr) {
|
||||
_death_march_count = 0; // death march has ended
|
||||
} else if (_death_march_count == 0) {
|
||||
if (should_alloc_in_eden(size)) {
|
||||
@ -417,7 +417,7 @@ ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
|
||||
HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = old_gen()->allocate(size);
|
||||
if (res != NULL) {
|
||||
if (res != nullptr) {
|
||||
_size_policy->tenured_allocation(size * HeapWordSize);
|
||||
}
|
||||
return res;
|
||||
@ -439,7 +439,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
|
||||
_death_march_count = 0;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
@ -471,7 +471,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
||||
|
||||
// Second level allocation failure.
|
||||
// Mark sweep and allocate in young generation.
|
||||
if (result == NULL && !invoked_full_gc) {
|
||||
if (result == nullptr && !invoked_full_gc) {
|
||||
do_full_collection(false);
|
||||
result = young_gen()->allocate(size);
|
||||
}
|
||||
@ -481,20 +481,20 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
||||
// Third level allocation failure.
|
||||
// After mark sweep and young generation allocation failure,
|
||||
// allocate in old generation.
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
result = allocate_old_gen_and_record(size);
|
||||
}
|
||||
|
||||
// Fourth level allocation failure. We're running out of memory.
|
||||
// More complete mark sweep and allocate in young generation.
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
do_full_collection(true);
|
||||
result = young_gen()->allocate(size);
|
||||
}
|
||||
|
||||
// Fifth level allocation failure.
|
||||
// After more complete mark sweep, allocate in old generation.
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
result = allocate_old_gen_and_record(size);
|
||||
}
|
||||
|
||||
@ -520,7 +520,7 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
|
||||
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
|
||||
HeapWord* result = young_gen()->allocate(requested_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
*actual_size = requested_size;
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
|
||||
assert(young_gen()->is_in(addr),
|
||||
"addr should be in allocated part of young gen");
|
||||
// called from os::print_location by find or VMError
|
||||
if (Debugging || VMError::is_error_reported()) return NULL;
|
||||
if (Debugging || VMError::is_error_reported()) return nullptr;
|
||||
Unimplemented();
|
||||
} else if (old_gen()->is_in_reserved(addr)) {
|
||||
assert(old_gen()->is_in(addr),
|
||||
@ -676,10 +676,10 @@ bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::print_on(outputStream* st) const {
|
||||
if (young_gen() != NULL) {
|
||||
if (young_gen() != nullptr) {
|
||||
young_gen()->print_on(st);
|
||||
}
|
||||
if (old_gen() != NULL) {
|
||||
if (old_gen() != nullptr) {
|
||||
old_gen()->print_on(st);
|
||||
}
|
||||
MetaspaceUtils::print_on(st);
|
||||
|
@ -112,11 +112,11 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
ParallelScavengeHeap() :
|
||||
CollectedHeap(),
|
||||
_death_march_count(0),
|
||||
_young_manager(NULL),
|
||||
_old_manager(NULL),
|
||||
_eden_pool(NULL),
|
||||
_survivor_pool(NULL),
|
||||
_old_pool(NULL),
|
||||
_young_manager(nullptr),
|
||||
_old_manager(nullptr),
|
||||
_eden_pool(nullptr),
|
||||
_survivor_pool(nullptr),
|
||||
_old_pool(nullptr),
|
||||
_workers("GC Thread", ParallelGCThreads) { }
|
||||
|
||||
// For use by VM operations
|
||||
@ -192,7 +192,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
// Memory allocation. "gc_time_limit_was_exceeded" will
|
||||
// be set to true if the adaptive size policy determine that
|
||||
// an excessive amount of time is being spent doing collections
|
||||
// and caused a NULL to be returned. If a NULL is not returned,
|
||||
// and caused a null to be returned. If a null is not returned,
|
||||
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
||||
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,7 +49,7 @@ class CheckForUnmarkedOops : public BasicOopIterateClosure {
|
||||
if (_young_gen->is_in_reserved(obj) &&
|
||||
!_card_table->addr_is_marked_imprecise(p)) {
|
||||
// Don't overwrite the first missing card mark
|
||||
if (_unmarked_addr == NULL) {
|
||||
if (_unmarked_addr == nullptr) {
|
||||
_unmarked_addr = (HeapWord*)p;
|
||||
}
|
||||
}
|
||||
@ -57,13 +57,13 @@ class CheckForUnmarkedOops : public BasicOopIterateClosure {
|
||||
|
||||
public:
|
||||
CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) :
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(nullptr) { }
|
||||
|
||||
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
|
||||
bool has_unmarked_oop() {
|
||||
return _unmarked_addr != NULL;
|
||||
return _unmarked_addr != nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,7 +85,7 @@ private:
|
||||
// pointing to the young generation after being scanned.
|
||||
ClassLoaderData* _scanned_cld;
|
||||
public:
|
||||
PSScavengeFromCLDClosure(PSPromotionManager* pm) : _pm(pm), _scanned_cld(NULL) { }
|
||||
PSScavengeFromCLDClosure(PSPromotionManager* pm) : _pm(pm), _scanned_cld(nullptr) { }
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
void do_oop(oop* p) {
|
||||
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
|
||||
@ -104,13 +104,13 @@ public:
|
||||
}
|
||||
|
||||
void set_scanned_cld(ClassLoaderData* cld) {
|
||||
assert(_scanned_cld == NULL || cld == NULL, "Should always only handling one cld at a time");
|
||||
assert(_scanned_cld == nullptr || cld == nullptr, "Should always only handling one cld at a time");
|
||||
_scanned_cld = cld;
|
||||
}
|
||||
|
||||
private:
|
||||
void do_cld_barrier() {
|
||||
assert(_scanned_cld != NULL, "Should not be called without having a scanned cld");
|
||||
assert(_scanned_cld != nullptr, "Should not be called without having a scanned cld");
|
||||
_scanned_cld->record_modified_oops();
|
||||
}
|
||||
};
|
||||
@ -133,7 +133,7 @@ public:
|
||||
// Clean the cld since we're going to scavenge all the metadata.
|
||||
cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
|
||||
|
||||
_oop_closure.set_scanned_cld(NULL);
|
||||
_oop_closure.set_scanned_cld(nullptr);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,17 +39,17 @@
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
PSOldGen* ParCompactionManager::_old_gen = NULL;
|
||||
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
||||
PSOldGen* ParCompactionManager::_old_gen = nullptr;
|
||||
ParCompactionManager** ParCompactionManager::_manager_array = nullptr;
|
||||
|
||||
ParCompactionManager::OopTaskQueueSet* ParCompactionManager::_oop_task_queues = NULL;
|
||||
ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = NULL;
|
||||
ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_queues = NULL;
|
||||
ParCompactionManager::OopTaskQueueSet* ParCompactionManager::_oop_task_queues = nullptr;
|
||||
ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = nullptr;
|
||||
ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_queues = nullptr;
|
||||
|
||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||
GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL;
|
||||
Monitor* ParCompactionManager::_shadow_region_monitor = NULL;
|
||||
ObjectStartArray* ParCompactionManager::_start_array = nullptr;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = nullptr;
|
||||
GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = nullptr;
|
||||
Monitor* ParCompactionManager::_shadow_region_monitor = nullptr;
|
||||
|
||||
ParCompactionManager::ParCompactionManager() {
|
||||
|
||||
@ -64,14 +64,14 @@ ParCompactionManager::ParCompactionManager() {
|
||||
}
|
||||
|
||||
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
assert(ParallelScavengeHeap::heap() != NULL,
|
||||
assert(ParallelScavengeHeap::heap() != nullptr,
|
||||
"Needed for initialization");
|
||||
|
||||
_mark_bitmap = mbm;
|
||||
|
||||
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
|
||||
|
||||
assert(_manager_array == NULL, "Attempt to initialize twice");
|
||||
assert(_manager_array == nullptr, "Attempt to initialize twice");
|
||||
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads, mtGC);
|
||||
|
||||
_oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
|
||||
@ -111,7 +111,7 @@ void ParCompactionManager::flush_all_string_dedup_requests() {
|
||||
ParCompactionManager*
|
||||
ParCompactionManager::gc_thread_compaction_manager(uint index) {
|
||||
assert(index < ParallelGCThreads, "index out of range");
|
||||
assert(_manager_array != NULL, "Sanity");
|
||||
assert(_manager_array != nullptr, "Sanity");
|
||||
return _manager_array[index];
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ void ParCompactionManager::drain_region_stacks() {
|
||||
void ParCompactionManager::drain_deferred_objects() {
|
||||
while (!_deferred_obj_array->is_empty()) {
|
||||
HeapWord* addr = _deferred_obj_array->pop();
|
||||
assert(addr != NULL, "expected a deferred object");
|
||||
assert(addr != nullptr, "expected a deferred object");
|
||||
PSParallelCompact::update_deferred_object(this, addr);
|
||||
}
|
||||
_deferred_obj_array->clear_and_deallocate();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -133,8 +133,8 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
||||
void push_deferred_object(HeapWord* addr);
|
||||
|
||||
void reset_bitmap_query_cache() {
|
||||
_last_query_beg = NULL;
|
||||
_last_query_obj = NULL;
|
||||
_last_query_beg = nullptr;
|
||||
_last_query_obj = nullptr;
|
||||
_last_query_ret = 0;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -141,7 +141,7 @@ inline void follow_array_specialized(objArrayOop obj, int index, ParCompactionMa
|
||||
cm->push_objarray(obj, end_index); // Push the continuation.
|
||||
}
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
// Push the non-null elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
cm->mark_and_push<T>(e);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,7 +43,7 @@ class PSGenerationCounters: public GenerationCounters {
|
||||
size_t min_capacity, size_t max_capacity, PSVirtualSpace* v);
|
||||
|
||||
void update_all() {
|
||||
assert(_virtual_space == NULL, "Only one should be in use");
|
||||
assert(_virtual_space == nullptr, "Only one should be in use");
|
||||
_current_size->set_value(_ps_virtual_space->committed_size());
|
||||
}
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -348,7 +348,7 @@ void PSOldGen::post_resize() {
|
||||
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
|
||||
|
||||
WorkerThreads* workers = Thread::current()->is_VM_thread() ?
|
||||
&ParallelScavengeHeap::heap()->workers() : NULL;
|
||||
&ParallelScavengeHeap::heap()->workers() : nullptr;
|
||||
|
||||
// The update of the space's end is done by this call. As that
|
||||
// makes the new space available for concurrent allocation, this
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,7 +71,7 @@ class PSOldGen : public CHeapObj<mtGC> {
|
||||
HeapWord* cas_allocate_noexpand(size_t word_size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = object_space()->cas_allocate(word_size);
|
||||
if (res != NULL) {
|
||||
if (res != nullptr) {
|
||||
DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
|
||||
_start_array.allocate_block(res);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -136,7 +136,7 @@ ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
|
||||
SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
|
||||
|
||||
SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
|
||||
ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
|
||||
ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
|
||||
|
||||
double PSParallelCompact::_dwl_mean;
|
||||
double PSParallelCompact::_dwl_std_dev;
|
||||
@ -151,15 +151,15 @@ void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
|
||||
{
|
||||
assert(src_region_idx != 0, "invalid src_region_idx");
|
||||
assert(partial_obj_size != 0, "invalid partial_obj_size argument");
|
||||
assert(destination != NULL, "invalid destination argument");
|
||||
assert(destination != nullptr, "invalid destination argument");
|
||||
|
||||
_src_region_idx = src_region_idx;
|
||||
_partial_obj_size = partial_obj_size;
|
||||
_destination = destination;
|
||||
|
||||
// These fields may not be updated below, so make sure they're clear.
|
||||
assert(_dest_region_addr == NULL, "should have been cleared");
|
||||
assert(_first_src_addr == NULL, "should have been cleared");
|
||||
assert(_dest_region_addr == nullptr, "should have been cleared");
|
||||
assert(_first_src_addr == nullptr, "should have been cleared");
|
||||
|
||||
// Determine the number of destination regions for the partial object.
|
||||
HeapWord* const last_word = destination + partial_obj_size - 1;
|
||||
@ -192,10 +192,10 @@ void SplitInfo::clear()
|
||||
{
|
||||
_src_region_idx = 0;
|
||||
_partial_obj_size = 0;
|
||||
_destination = NULL;
|
||||
_destination = nullptr;
|
||||
_destination_count = 0;
|
||||
_dest_region_addr = NULL;
|
||||
_first_src_addr = NULL;
|
||||
_dest_region_addr = nullptr;
|
||||
_first_src_addr = nullptr;
|
||||
assert(!is_valid(), "sanity");
|
||||
}
|
||||
|
||||
@ -204,10 +204,10 @@ void SplitInfo::verify_clear()
|
||||
{
|
||||
assert(_src_region_idx == 0, "not clear");
|
||||
assert(_partial_obj_size == 0, "not clear");
|
||||
assert(_destination == NULL, "not clear");
|
||||
assert(_destination == nullptr, "not clear");
|
||||
assert(_destination_count == 0, "not clear");
|
||||
assert(_dest_region_addr == NULL, "not clear");
|
||||
assert(_first_src_addr == NULL, "not clear");
|
||||
assert(_dest_region_addr == nullptr, "not clear");
|
||||
assert(_first_src_addr == nullptr, "not clear");
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
@ -414,14 +414,14 @@ print_initial_summary_data(ParallelCompactData& summary_data,
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
ParallelCompactData::ParallelCompactData() :
|
||||
_region_start(NULL),
|
||||
DEBUG_ONLY(_region_end(NULL) COMMA)
|
||||
_region_vspace(NULL),
|
||||
_region_start(nullptr),
|
||||
DEBUG_ONLY(_region_end(nullptr) COMMA)
|
||||
_region_vspace(nullptr),
|
||||
_reserved_byte_size(0),
|
||||
_region_data(NULL),
|
||||
_region_data(nullptr),
|
||||
_region_count(0),
|
||||
_block_vspace(NULL),
|
||||
_block_data(NULL),
|
||||
_block_vspace(nullptr),
|
||||
_block_data(nullptr),
|
||||
_block_count(0) {}
|
||||
|
||||
bool ParallelCompactData::initialize(MemRegion covered_region)
|
||||
@ -692,7 +692,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
||||
HeapWord* target_beg, HeapWord* target_end,
|
||||
HeapWord** target_next)
|
||||
{
|
||||
HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
|
||||
HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
|
||||
log_develop_trace(gc, compaction)(
|
||||
"sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
|
||||
"tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
|
||||
@ -713,7 +713,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
||||
// at which the source space can be 'split' so that part is copied to the
|
||||
// target space and the rest is copied elsewhere.
|
||||
if (dest_addr + words > target_end) {
|
||||
assert(source_next != NULL, "source_next is NULL when splitting");
|
||||
assert(source_next != nullptr, "source_next is null when splitting");
|
||||
*source_next = summarize_split_space(cur_region, split_info, dest_addr,
|
||||
target_end, target_next);
|
||||
return false;
|
||||
@ -775,7 +775,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
||||
}
|
||||
|
||||
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
|
||||
assert(addr != NULL, "Should detect NULL oop earlier");
|
||||
assert(addr != nullptr, "Should detect null oop earlier");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
|
||||
|
||||
@ -838,7 +838,7 @@ STWGCTimer PSParallelCompact::_gc_timer;
|
||||
ParallelOldTracer PSParallelCompact::_gc_tracer;
|
||||
elapsedTimer PSParallelCompact::_accumulated_time;
|
||||
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
|
||||
CollectorCounters* PSParallelCompact::_counters = NULL;
|
||||
CollectorCounters* PSParallelCompact::_counters = nullptr;
|
||||
ParMarkBitMap PSParallelCompact::_mark_bitmap;
|
||||
ParallelCompactData PSParallelCompact::_summary_data;
|
||||
|
||||
@ -1252,7 +1252,7 @@ PSParallelCompact::first_dead_space_region(const RegionData* beg,
|
||||
RegionData* const middle_ptr = sd.region(middle);
|
||||
HeapWord* const dest = middle_ptr->destination();
|
||||
HeapWord* const addr = sd.region_to_addr(middle);
|
||||
assert(dest != NULL, "sanity");
|
||||
assert(dest != nullptr, "sanity");
|
||||
assert(dest <= addr, "must move left");
|
||||
|
||||
if (middle > left && dest < addr) {
|
||||
@ -1282,7 +1282,7 @@ PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
|
||||
RegionData* const middle_ptr = sd.region(middle);
|
||||
HeapWord* const dest = middle_ptr->destination();
|
||||
HeapWord* const addr = sd.region_to_addr(middle);
|
||||
assert(dest != NULL, "sanity");
|
||||
assert(dest != nullptr, "sanity");
|
||||
assert(dest <= addr, "must move left");
|
||||
|
||||
const size_t dead_to_left = pointer_delta(addr, dest);
|
||||
@ -1307,10 +1307,10 @@ PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
|
||||
{
|
||||
ParallelCompactData& sd = summary_data();
|
||||
|
||||
assert(cp != NULL, "sanity");
|
||||
assert(bottom != NULL, "sanity");
|
||||
assert(top != NULL, "sanity");
|
||||
assert(new_top != NULL, "sanity");
|
||||
assert(cp != nullptr, "sanity");
|
||||
assert(bottom != nullptr, "sanity");
|
||||
assert(top != nullptr, "sanity");
|
||||
assert(new_top != nullptr, "sanity");
|
||||
assert(top >= new_top, "summary data problem?");
|
||||
assert(new_top > bottom, "space is empty; should not be here");
|
||||
assert(new_top >= cp->destination(), "sanity");
|
||||
@ -1423,7 +1423,7 @@ void PSParallelCompact::summarize_spaces_quick()
|
||||
const MutableSpace* space = _space_info[i].space();
|
||||
HeapWord** nta = _space_info[i].new_top_addr();
|
||||
bool result = _summary_data.summarize(_space_info[i].split_info(),
|
||||
space->bottom(), space->top(), NULL,
|
||||
space->bottom(), space->top(), nullptr,
|
||||
space->bottom(), space->end(), nta);
|
||||
assert(result, "space must fit into itself");
|
||||
_space_info[i].set_dense_prefix(space->bottom());
|
||||
@ -1492,7 +1492,7 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
||||
CollectedHeap::fill_with_object(obj_beg, obj_len);
|
||||
_mark_bitmap.mark_obj(obj_beg, obj_len);
|
||||
_summary_data.add_obj(obj_beg, obj_len);
|
||||
assert(start_array(id) != NULL, "sanity");
|
||||
assert(start_array(id) != nullptr, "sanity");
|
||||
start_array(id)->allocate_block(obj_beg);
|
||||
}
|
||||
}
|
||||
@ -1532,7 +1532,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
|
||||
// Compute the destination of each Region, and thus each object.
|
||||
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
|
||||
_summary_data.summarize(_space_info[id].split_info(),
|
||||
dense_prefix_end, space->top(), NULL,
|
||||
dense_prefix_end, space->top(), nullptr,
|
||||
dense_prefix_end, space->end(),
|
||||
_space_info[id].new_top_addr());
|
||||
}
|
||||
@ -1626,7 +1626,7 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
|
||||
// All the live data will fit.
|
||||
bool done = _summary_data.summarize(_space_info[id].split_info(),
|
||||
space->bottom(), space->top(),
|
||||
NULL,
|
||||
nullptr,
|
||||
*new_top_addr, dst_space_end,
|
||||
new_top_addr);
|
||||
assert(done, "space must fit into old gen");
|
||||
@ -1635,14 +1635,14 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
|
||||
_space_info[id].set_new_top(space->bottom());
|
||||
} else if (live > 0) {
|
||||
// Attempt to fit part of the source space into the target space.
|
||||
HeapWord* next_src_addr = NULL;
|
||||
HeapWord* next_src_addr = nullptr;
|
||||
bool done = _summary_data.summarize(_space_info[id].split_info(),
|
||||
space->bottom(), space->top(),
|
||||
&next_src_addr,
|
||||
*new_top_addr, dst_space_end,
|
||||
new_top_addr);
|
||||
assert(!done, "space should not fit into old gen");
|
||||
assert(next_src_addr != NULL, "sanity");
|
||||
assert(next_src_addr != nullptr, "sanity");
|
||||
|
||||
// The source space becomes the new target, so the remainder is compacted
|
||||
// within the space itself.
|
||||
@ -1654,7 +1654,7 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
|
||||
SpaceId(id), next_src_addr, space->top());)
|
||||
done = _summary_data.summarize(_space_info[id].split_info(),
|
||||
next_src_addr, space->top(),
|
||||
NULL,
|
||||
nullptr,
|
||||
space->bottom(), dst_space_end,
|
||||
new_top_addr);
|
||||
assert(done, "space must fit when compacted into itself");
|
||||
@ -1704,7 +1704,7 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
|
||||
// be calling invoke() instead.
|
||||
bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
assert(ref_processor() != NULL, "Sanity");
|
||||
assert(ref_processor() != nullptr, "Sanity");
|
||||
|
||||
if (GCLocker::check_active_before_gc()) {
|
||||
return false;
|
||||
@ -1745,7 +1745,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
|
||||
|
||||
GCTraceCPUTime tcpu(&_gc_tracer);
|
||||
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
|
||||
GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
|
||||
|
||||
heap->pre_full_gc_dump(&_gc_timer);
|
||||
|
||||
@ -1937,7 +1937,7 @@ void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
|
||||
ParCompactionManager::gc_thread_compaction_manager(worker_id);
|
||||
|
||||
do {
|
||||
oop obj = NULL;
|
||||
oop obj = nullptr;
|
||||
ObjArrayTask task;
|
||||
if (ParCompactionManager::steal_objarray(worker_id, task)) {
|
||||
cm->follow_array((objArrayOop)task.obj(), task.index());
|
||||
@ -2222,7 +2222,7 @@ class TaskQueue : StackObj {
|
||||
uint _insert_index;
|
||||
PSParallelCompact::UpdateDensePrefixTask* _backing_array;
|
||||
public:
|
||||
explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
|
||||
explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(nullptr) {
|
||||
_backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
|
||||
}
|
||||
~TaskQueue() {
|
||||
@ -2601,7 +2601,7 @@ void PSParallelCompact::update_deferred_object(ParCompactionManager* cm, HeapWor
|
||||
|
||||
const SpaceInfo* const space_info = _space_info + space_id(addr);
|
||||
ObjectStartArray* const start_array = space_info->start_array();
|
||||
if (start_array != NULL) {
|
||||
if (start_array != nullptr) {
|
||||
start_array->allocate_block(addr);
|
||||
}
|
||||
|
||||
@ -3095,7 +3095,7 @@ void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *d
|
||||
|
||||
ParMarkBitMapClosure::IterationStatus
|
||||
MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
assert(destination() != NULL, "sanity");
|
||||
assert(destination() != nullptr, "sanity");
|
||||
assert(bitmap()->obj_size(addr) == words, "bad size");
|
||||
|
||||
_source = addr;
|
||||
@ -3107,7 +3107,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
}
|
||||
|
||||
// The start_array must be updated even if the object is not moving.
|
||||
if (_start_array != NULL) {
|
||||
if (_start_array != nullptr) {
|
||||
_start_array->allocate_block(destination());
|
||||
}
|
||||
|
||||
@ -3118,7 +3118,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
|
||||
oop moved_oop = cast_to_oop(copy_destination());
|
||||
compaction_manager()->update_contents(moved_oop);
|
||||
assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
|
||||
assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or null at " PTR_FORMAT, p2i(moved_oop));
|
||||
|
||||
update_state(words);
|
||||
assert(copy_destination() == cast_from_oop<HeapWord*>(moved_oop) + moved_oop->size(), "sanity");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -134,12 +134,12 @@ public:
|
||||
|
||||
// If a word within the partial object will be written to the first word of a
|
||||
// destination region, this is the address of the destination region;
|
||||
// otherwise this is NULL.
|
||||
// otherwise this is null.
|
||||
HeapWord* dest_region_addr() const { return _dest_region_addr; }
|
||||
|
||||
// If a word within the partial object will be written to the first word of a
|
||||
// destination region, this is the address of that word within the partial
|
||||
// object; otherwise this is NULL.
|
||||
// object; otherwise this is null.
|
||||
HeapWord* first_src_addr() const { return _first_src_addr; }
|
||||
|
||||
// Record the data necessary to split the region src_region_idx.
|
||||
@ -182,7 +182,7 @@ class SpaceInfo
|
||||
// Where the dense prefix ends, or the compacted region begins.
|
||||
HeapWord* dense_prefix() const { return _dense_prefix; }
|
||||
|
||||
// The start array for the (generation containing the) space, or NULL if there
|
||||
// The start array for the (generation containing the) space, or null if there
|
||||
// is no start array.
|
||||
ObjectStartArray* start_array() const { return _start_array; }
|
||||
|
||||
@ -566,13 +566,13 @@ inline void ParallelCompactData::RegionData::decrement_destination_count()
|
||||
inline HeapWord* ParallelCompactData::RegionData::data_location() const
|
||||
{
|
||||
DEBUG_ONLY(return _data_location;)
|
||||
NOT_DEBUG(return NULL;)
|
||||
NOT_DEBUG(return nullptr;)
|
||||
}
|
||||
|
||||
inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
|
||||
{
|
||||
DEBUG_ONLY(return _highest_ref;)
|
||||
NOT_DEBUG(return NULL;)
|
||||
NOT_DEBUG(return nullptr;)
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
|
||||
@ -841,7 +841,7 @@ ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
|
||||
#endif
|
||||
{
|
||||
_words_remaining = words;
|
||||
_source = NULL;
|
||||
_source = nullptr;
|
||||
}
|
||||
|
||||
inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -116,7 +116,7 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = cast_to_oop(summary_data().calc_new_pointer(obj, cm));
|
||||
assert(new_obj != NULL, "non-null address for live objects");
|
||||
assert(new_obj != nullptr, "non-null address for live objects");
|
||||
// Is it actually relocated at all?
|
||||
if (new_obj != obj) {
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,9 +78,9 @@ void PSPromotionLAB::flush() {
|
||||
HeapWord* tlab_end = end() + CollectedHeap::min_dummy_object_size();
|
||||
CollectedHeap::fill_with_object(top(), tlab_end, trueInDebug);
|
||||
|
||||
set_bottom(NULL);
|
||||
set_end(NULL);
|
||||
set_top(NULL);
|
||||
set_bottom(nullptr);
|
||||
set_end(nullptr);
|
||||
set_top(nullptr);
|
||||
|
||||
_state = flushed;
|
||||
}
|
||||
@ -113,7 +113,7 @@ void PSOldPromotionLAB::flush() {
|
||||
|
||||
PSPromotionLAB::flush();
|
||||
|
||||
assert(_start_array != NULL, "Sanity");
|
||||
assert(_start_array != nullptr, "Sanity");
|
||||
|
||||
_start_array->allocate_block(obj);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,7 +57,7 @@ class PSPromotionLAB : public CHeapObj<mtGC> {
|
||||
// The shared initialize code invokes this.
|
||||
debug_only(virtual bool lab_is_valid(MemRegion lab) { return false; });
|
||||
|
||||
PSPromotionLAB() : _top(NULL), _bottom(NULL), _end(NULL), _state(zero_size) { }
|
||||
PSPromotionLAB() : _top(nullptr), _bottom(nullptr), _end(nullptr), _state(zero_size) { }
|
||||
|
||||
public:
|
||||
// Filling and flushing.
|
||||
@ -103,7 +103,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
|
||||
ObjectStartArray* _start_array;
|
||||
|
||||
public:
|
||||
PSOldPromotionLAB() : _start_array(NULL) { }
|
||||
PSOldPromotionLAB() : _start_array(nullptr) { }
|
||||
|
||||
void set_start_array(ObjectStartArray* start_array) { _start_array = start_array; }
|
||||
|
||||
@ -113,7 +113,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
|
||||
HeapWord* allocate(size_t size) {
|
||||
// Cannot test for this now that we're doing promotion failures
|
||||
// assert(_state != flushed, "Sanity");
|
||||
assert(_start_array != NULL, "Sanity");
|
||||
assert(_start_array != nullptr, "Sanity");
|
||||
HeapWord* obj = top();
|
||||
if (size <= pointer_delta(end(), obj)) {
|
||||
HeapWord* new_top = obj + size;
|
||||
@ -124,7 +124,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
|
||||
return obj;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
debug_only(virtual bool lab_is_valid(MemRegion lab));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,7 +40,7 @@ HeapWord* PSYoungPromotionLAB::allocate(size_t size) {
|
||||
assert(is_object_aligned(new_top), "checking alignment");
|
||||
return obj;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,11 +43,11 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
|
||||
PSPromotionManager::PSScannerTasksQueueSet* PSPromotionManager::_stack_array_depth = NULL;
|
||||
PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL;
|
||||
PSOldGen* PSPromotionManager::_old_gen = NULL;
|
||||
MutableSpace* PSPromotionManager::_young_space = NULL;
|
||||
PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = nullptr;
|
||||
PSPromotionManager::PSScannerTasksQueueSet* PSPromotionManager::_stack_array_depth = nullptr;
|
||||
PreservedMarksSet* PSPromotionManager::_preserved_marks_set = nullptr;
|
||||
PSOldGen* PSPromotionManager::_old_gen = nullptr;
|
||||
MutableSpace* PSPromotionManager::_young_space = nullptr;
|
||||
|
||||
void PSPromotionManager::initialize() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
@ -59,7 +59,7 @@ void PSPromotionManager::initialize() {
|
||||
|
||||
// To prevent false sharing, we pad the PSPromotionManagers
|
||||
// and make sure that the first instance starts at a cache line.
|
||||
assert(_manager_array == NULL, "Attempt to initialize twice");
|
||||
assert(_manager_array == nullptr, "Attempt to initialize twice");
|
||||
_manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
|
||||
|
||||
_stack_array_depth = new PSScannerTasksQueueSet(ParallelGCThreads);
|
||||
@ -71,7 +71,7 @@ void PSPromotionManager::initialize() {
|
||||
// The VMThread gets its own PSPromotionManager, which is not available
|
||||
// for work stealing.
|
||||
|
||||
assert(_preserved_marks_set == NULL, "Attempt to initialize twice");
|
||||
assert(_preserved_marks_set == nullptr, "Attempt to initialize twice");
|
||||
_preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */);
|
||||
_preserved_marks_set->init(promotion_manager_num);
|
||||
for (uint i = 0; i < promotion_manager_num; i += 1) {
|
||||
@ -90,12 +90,12 @@ bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) {
|
||||
|
||||
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) {
|
||||
assert(index < ParallelGCThreads, "index out of range");
|
||||
assert(_manager_array != NULL, "Sanity");
|
||||
assert(_manager_array != nullptr, "Sanity");
|
||||
return &_manager_array[index];
|
||||
}
|
||||
|
||||
PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
|
||||
assert(_manager_array != NULL, "Sanity");
|
||||
assert(_manager_array != nullptr, "Sanity");
|
||||
return &_manager_array[0];
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ PSPromotionManager::PSPromotionManager() {
|
||||
// let's choose 1.5x the chunk size
|
||||
_min_array_size_for_chunking = 3 * _array_chunk_size / 2;
|
||||
|
||||
_preserved_marks = NULL;
|
||||
_preserved_marks = nullptr;
|
||||
|
||||
reset();
|
||||
}
|
||||
@ -218,7 +218,7 @@ void PSPromotionManager::reset() {
|
||||
}
|
||||
|
||||
void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_marks) {
|
||||
assert(_preserved_marks == NULL, "do not set it twice");
|
||||
assert(_preserved_marks == nullptr, "do not set it twice");
|
||||
_preserved_marks = preserved_marks;
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {
|
||||
// this started. If it is the same (i.e., no forwarding
|
||||
// pointer has been installed), then this thread owns
|
||||
// it.
|
||||
if (obj->forward_to_atomic(obj, obj_mark) == NULL) {
|
||||
if (obj->forward_to_atomic(obj, obj_mark) == nullptr) {
|
||||
// We won any races, we "own" this object.
|
||||
assert(obj == obj->forwardee(), "Sanity");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,7 +45,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
|
||||
assert(_manager_array != NULL, "access of NULL manager_array");
|
||||
assert(_manager_array != nullptr, "access of null manager_array");
|
||||
assert(index < ParallelGCThreads, "out of range manager_array access");
|
||||
return &_manager_array[index];
|
||||
}
|
||||
@ -68,10 +68,10 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
|
||||
uint age, bool tenured,
|
||||
const PSPromotionLAB* lab) {
|
||||
// Skip if memory allocation failed
|
||||
if (new_obj != NULL) {
|
||||
if (new_obj != nullptr) {
|
||||
const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
|
||||
|
||||
if (lab != NULL) {
|
||||
if (lab != nullptr) {
|
||||
// Promotion of object through newly allocated PLAB
|
||||
if (gc_tracer->should_report_promotion_in_new_plab_event()) {
|
||||
size_t obj_bytes = obj_size * HeapWordSize;
|
||||
@ -161,7 +161,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
markWord test_mark) {
|
||||
assert(should_scavenge(&o), "Sanity");
|
||||
|
||||
oop new_obj = NULL;
|
||||
oop new_obj = nullptr;
|
||||
bool new_obj_is_tenured = false;
|
||||
size_t new_obj_size = o->size();
|
||||
|
||||
@ -173,18 +173,18 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
// Try allocating obj in to-space (unless too old)
|
||||
if (age < PSScavenge::tenuring_threshold()) {
|
||||
new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
|
||||
if (new_obj == NULL && !_young_gen_is_full) {
|
||||
if (new_obj == nullptr && !_young_gen_is_full) {
|
||||
// Do we allocate directly, or flush and refill?
|
||||
if (new_obj_size > (YoungPLABSize / 2)) {
|
||||
// Allocate this object directly
|
||||
new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr);
|
||||
} else {
|
||||
// Flush and fill
|
||||
_young_lab.flush();
|
||||
|
||||
HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
|
||||
if (lab_base != NULL) {
|
||||
if (lab_base != nullptr) {
|
||||
_young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
|
||||
// Try the young lab allocation again.
|
||||
new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
|
||||
@ -198,7 +198,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
}
|
||||
|
||||
// Otherwise try allocating obj tenured
|
||||
if (new_obj == NULL) {
|
||||
if (new_obj == nullptr) {
|
||||
#ifndef PRODUCT
|
||||
if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
|
||||
return oop_promotion_failed(o, test_mark);
|
||||
@ -208,19 +208,19 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
|
||||
new_obj_is_tenured = true;
|
||||
|
||||
if (new_obj == NULL) {
|
||||
if (new_obj == nullptr) {
|
||||
if (!_old_gen_is_full) {
|
||||
// Do we allocate directly, or flush and refill?
|
||||
if (new_obj_size > (OldPLABSize / 2)) {
|
||||
// Allocate this object directly
|
||||
new_obj = cast_to_oop(old_gen()->allocate(new_obj_size));
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr);
|
||||
} else {
|
||||
// Flush and fill
|
||||
_old_lab.flush();
|
||||
|
||||
HeapWord* lab_base = old_gen()->allocate(OldPLABSize);
|
||||
if(lab_base != NULL) {
|
||||
if(lab_base != nullptr) {
|
||||
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
|
||||
// Try the old lab allocation again.
|
||||
new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
|
||||
@ -235,14 +235,14 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
// CAS testing code. Keeping the code here also minimizes
|
||||
// the impact on the common case fast path code.
|
||||
|
||||
if (new_obj == NULL) {
|
||||
if (new_obj == nullptr) {
|
||||
_old_gen_is_full = true;
|
||||
return oop_promotion_failed(o, test_mark);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(new_obj != NULL, "allocation should have succeeded");
|
||||
assert(new_obj != nullptr, "allocation should have succeeded");
|
||||
|
||||
// Copy obj
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
|
||||
@ -254,7 +254,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
// Now we have to CAS in the header.
|
||||
// Make copy visible to threads reading the forwardee.
|
||||
oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release);
|
||||
if (forwardee == NULL) { // forwardee is NULL when forwarding is successful
|
||||
if (forwardee == nullptr) { // forwardee is null when forwarding is successful
|
||||
// We won any races, we "own" this object.
|
||||
assert(new_obj == o->forwardee(), "Sanity");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,16 +72,16 @@
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
|
||||
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
|
||||
PSCardTable* PSScavenge::_card_table = NULL;
|
||||
ReferenceProcessor* PSScavenge::_ref_processor = nullptr;
|
||||
PSCardTable* PSScavenge::_card_table = nullptr;
|
||||
bool PSScavenge::_survivor_overflow = false;
|
||||
uint PSScavenge::_tenuring_threshold = 0;
|
||||
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
||||
HeapWord* PSScavenge::_young_generation_boundary = nullptr;
|
||||
uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
|
||||
elapsedTimer PSScavenge::_accumulated_time;
|
||||
STWGCTimer PSScavenge::_gc_timer;
|
||||
ParallelScavengeTracer PSScavenge::_gc_tracer;
|
||||
CollectorCounters* PSScavenge::_counters = NULL;
|
||||
CollectorCounters* PSScavenge::_counters = nullptr;
|
||||
|
||||
static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
@ -159,7 +159,7 @@ public:
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_to_space = heap->young_gen()->to_space();
|
||||
|
||||
assert(_promotion_manager != NULL, "Sanity");
|
||||
assert(_promotion_manager != nullptr, "Sanity");
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
@ -300,7 +300,7 @@ public:
|
||||
_active_workers(active_workers),
|
||||
_is_old_gen_empty(old_gen->object_space()->is_empty()),
|
||||
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
|
||||
assert(_old_gen != NULL, "Sanity");
|
||||
assert(_old_gen != nullptr, "Sanity");
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
@ -406,7 +406,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
ResourceMark rm;
|
||||
|
||||
GCTraceCPUTime tcpu(&_gc_tracer);
|
||||
GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
|
||||
GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true);
|
||||
TraceCollectorStats tcs(counters());
|
||||
TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,7 +77,7 @@ class PSScavenge: AllStatic {
|
||||
static bool should_attempt_scavenge();
|
||||
|
||||
// Private accessors
|
||||
static PSCardTable* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
|
||||
static PSCardTable* const card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
|
||||
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
|
||||
|
||||
public:
|
||||
@ -93,7 +93,7 @@ class PSScavenge: AllStatic {
|
||||
}
|
||||
// Used by scavenge_contents
|
||||
static ReferenceProcessor* const reference_processor() {
|
||||
assert(_ref_processor != NULL, "Sanity");
|
||||
assert(_ref_processor != nullptr, "Sanity");
|
||||
return _ref_processor;
|
||||
}
|
||||
// The promotion managers tell us if they encountered overflow
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,7 +44,7 @@ void VM_ParallelGCFailedAllocation::doit() {
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
_result = heap->failed_mem_allocate(_word_size);
|
||||
|
||||
if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
|
||||
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
|
||||
set_gc_locked();
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,10 +40,10 @@ PSVirtualSpace::PSVirtualSpace(ReservedSpace rs, size_t alignment) :
|
||||
// Deprecated.
|
||||
PSVirtualSpace::PSVirtualSpace():
|
||||
_alignment(os::vm_page_size()),
|
||||
_reserved_low_addr(NULL),
|
||||
_reserved_high_addr(NULL),
|
||||
_committed_low_addr(NULL),
|
||||
_committed_high_addr(NULL),
|
||||
_reserved_low_addr(nullptr),
|
||||
_reserved_high_addr(nullptr),
|
||||
_committed_low_addr(nullptr),
|
||||
_committed_high_addr(nullptr),
|
||||
_special(false) {
|
||||
}
|
||||
|
||||
@ -62,8 +62,8 @@ void PSVirtualSpace::release() {
|
||||
DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
|
||||
// This may not release memory it didn't reserve.
|
||||
// Use rs.release() to release the underlying memory instead.
|
||||
_reserved_low_addr = _reserved_high_addr = NULL;
|
||||
_committed_low_addr = _committed_high_addr = NULL;
|
||||
_reserved_low_addr = _reserved_high_addr = nullptr;
|
||||
_committed_low_addr = _committed_high_addr = nullptr;
|
||||
_special = false;
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ void PSVirtualSpace::verify() const {
|
||||
|
||||
// Reserved region must be non-empty or both addrs must be 0.
|
||||
assert(reserved_low_addr() < reserved_high_addr() ||
|
||||
reserved_low_addr() == NULL && reserved_high_addr() == NULL,
|
||||
reserved_low_addr() == nullptr && reserved_high_addr() == nullptr,
|
||||
"bad reserved addrs");
|
||||
assert(committed_low_addr() <= committed_high_addr(), "bad committed addrs");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,16 +37,16 @@
|
||||
|
||||
PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, size_t max_size) :
|
||||
_reserved(),
|
||||
_virtual_space(NULL),
|
||||
_eden_space(NULL),
|
||||
_from_space(NULL),
|
||||
_to_space(NULL),
|
||||
_virtual_space(nullptr),
|
||||
_eden_space(nullptr),
|
||||
_from_space(nullptr),
|
||||
_to_space(nullptr),
|
||||
_min_gen_size(min_size),
|
||||
_max_gen_size(max_size),
|
||||
_gen_counters(NULL),
|
||||
_eden_counters(NULL),
|
||||
_from_counters(NULL),
|
||||
_to_counters(NULL)
|
||||
_gen_counters(nullptr),
|
||||
_eden_counters(nullptr),
|
||||
_from_counters(nullptr),
|
||||
_to_counters(nullptr)
|
||||
{
|
||||
initialize(rs, initial_size, GenAlignment);
|
||||
}
|
||||
@ -734,7 +734,7 @@ size_t PSYoungGen::available_to_min_gen() {
|
||||
// from-space.
|
||||
size_t PSYoungGen::available_to_live() {
|
||||
size_t delta_in_survivor = 0;
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
MutableSpace* space_shrinking = nullptr;
|
||||
if (from_space()->end() > to_space()->end()) {
|
||||
space_shrinking = from_space();
|
||||
} else {
|
||||
@ -781,7 +781,7 @@ void PSYoungGen::reset_survivors_after_shrink() {
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
PSScavenge::set_subject_to_discovery_span(_reserved);
|
||||
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
MutableSpace* space_shrinking = nullptr;
|
||||
if (from_space()->end() > to_space()->end()) {
|
||||
space_shrinking = from_space();
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user