8301223: Replace NULL with nullptr in share/gc/g1/
Reviewed-by: tschatzl, kbarrett
This commit is contained in:
parent
544bd260b6
commit
75a4edca6b
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -106,7 +106,7 @@ void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
|
||||
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
|
||||
assert(pre_val->is_register(), "must be");
|
||||
assert(pre_val->type() == T_OBJECT, "must be an object");
|
||||
assert(info == NULL, "sanity");
|
||||
assert(info == nullptr, "sanity");
|
||||
|
||||
slow = new G1PreBarrierStub(pre_val);
|
||||
}
|
||||
@ -123,9 +123,9 @@ void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_v
|
||||
return;
|
||||
}
|
||||
|
||||
// If the "new_val" is a constant NULL, no barrier is necessary.
|
||||
// If the "new_val" is a constant null, no barrier is necessary.
|
||||
if (new_val->is_constant() &&
|
||||
new_val->as_constant_ptr()->as_jobject() == NULL) return;
|
||||
new_val->as_constant_ptr()->as_jobject() == nullptr) return;
|
||||
|
||||
if (!new_val->is_register()) {
|
||||
LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
|
||||
@ -204,7 +204,7 @@ class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->generate_c1_pre_barrier_runtime_stub(sasm);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
@ -212,7 +212,7 @@ class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->generate_c1_post_barrier_runtime_stub(sasm);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,7 +56,7 @@ class G1PreBarrierStub: public CodeStub {
|
||||
// previous value is assumed to have already been loaded into pre_val.
|
||||
G1PreBarrierStub(LIR_Opr pre_val) :
|
||||
_do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val),
|
||||
_patch_code(lir_patch_none), _info(NULL)
|
||||
_patch_code(lir_patch_none), _info(nullptr)
|
||||
{
|
||||
assert(_pre_val->is_register(), "should be a register");
|
||||
FrameMap* f = Compilation::current()->frame_map();
|
||||
@ -74,7 +74,7 @@ class G1PreBarrierStub: public CodeStub {
|
||||
if (_do_load) {
|
||||
// don't pass in the code emit info since it's processed in the fast
|
||||
// path
|
||||
if (_info != NULL)
|
||||
if (_info != nullptr)
|
||||
visitor->do_slow_case(_info);
|
||||
else
|
||||
visitor->do_slow_case();
|
||||
@ -134,8 +134,8 @@ class G1BarrierSetC1 : public ModRefBarrierSetC1 {
|
||||
|
||||
public:
|
||||
G1BarrierSetC1()
|
||||
: _pre_barrier_c1_runtime_code_blob(NULL),
|
||||
_post_barrier_c1_runtime_code_blob(NULL) {}
|
||||
: _pre_barrier_c1_runtime_code_blob(nullptr),
|
||||
_post_barrier_c1_runtime_code_blob(nullptr) {}
|
||||
|
||||
CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
|
||||
CodeBlob* post_barrier_c1_runtime_code_blob() { return _post_barrier_c1_runtime_code_blob; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -73,12 +73,12 @@ const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
|
||||
* marking are kept alive, all reference updates need to any previous
|
||||
* reference stored before writing.
|
||||
*
|
||||
* If the previous value is NULL there is no need to save the old value.
|
||||
* References that are NULL are filtered during runtime by the barrier
|
||||
* If the previous value is null there is no need to save the old value.
|
||||
* References that are null are filtered during runtime by the barrier
|
||||
* code to avoid unnecessary queuing.
|
||||
*
|
||||
* However in the case of newly allocated objects it might be possible to
|
||||
* prove that the reference about to be overwritten is NULL during compile
|
||||
* prove that the reference about to be overwritten is null during compile
|
||||
* time and avoid adding the barrier code completely.
|
||||
*
|
||||
* The compiler needs to determine that the object in which a field is about
|
||||
@ -100,7 +100,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
|
||||
return false; // cannot unalias unless there are precise offsets
|
||||
}
|
||||
|
||||
if (alloc == NULL) {
|
||||
if (alloc == nullptr) {
|
||||
return false; // No allocation found
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
|
||||
intptr_t st_offset = 0;
|
||||
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
|
||||
|
||||
if (st_base == NULL) {
|
||||
if (st_base == nullptr) {
|
||||
break; // inscrutable pointer
|
||||
}
|
||||
|
||||
@ -156,12 +156,12 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
|
||||
// Make sure that we are looking at the same allocation site.
|
||||
// The alloc variable is guaranteed to not be null here from earlier check.
|
||||
if (alloc == st_alloc) {
|
||||
// Check that the initialization is storing NULL so that no previous store
|
||||
// Check that the initialization is storing null so that no previous store
|
||||
// has been moved up and directly write a reference
|
||||
Node* captured_store = st_init->find_captured_store(offset,
|
||||
type2aelembytes(T_OBJECT),
|
||||
phase);
|
||||
if (captured_store == NULL || captured_store == st_init->zero_memory()) {
|
||||
if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -191,10 +191,10 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
|
||||
|
||||
if (do_load) {
|
||||
// We need to generate the load of the previous value
|
||||
assert(obj != NULL, "must have a base");
|
||||
assert(adr != NULL, "where are loading from?");
|
||||
assert(pre_val == NULL, "loaded already?");
|
||||
assert(val_type != NULL, "need a type");
|
||||
assert(obj != nullptr, "must have a base");
|
||||
assert(adr != nullptr, "where are loading from?");
|
||||
assert(pre_val == nullptr, "loaded already?");
|
||||
assert(val_type != nullptr, "need a type");
|
||||
|
||||
if (use_ReduceInitialCardMarks()
|
||||
&& g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
|
||||
@ -203,7 +203,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
|
||||
|
||||
} else {
|
||||
// In this case both val_type and alias_idx are unused.
|
||||
assert(pre_val != NULL, "must be loaded already");
|
||||
assert(pre_val != nullptr, "must be loaded already");
|
||||
// Nothing to be done if pre_val is null.
|
||||
if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
|
||||
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
|
||||
@ -248,7 +248,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
|
||||
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx, false, MemNode::unordered, LoadNode::Pinned);
|
||||
}
|
||||
|
||||
// if (pre_val != NULL)
|
||||
// if (pre_val != nullptr)
|
||||
__ if_then(pre_val, BoolTest::ne, kit->null()); {
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
@ -270,7 +270,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
|
||||
const TypeFunc *tf = write_ref_field_pre_entry_Type();
|
||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
|
||||
} __ end_if(); // (!index)
|
||||
} __ end_if(); // (pre_val != NULL)
|
||||
} __ end_if(); // (pre_val != nullptr)
|
||||
} __ end_if(); // (!marking)
|
||||
|
||||
// Final sync IdealKit and GraphKit.
|
||||
@ -288,7 +288,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
|
||||
*
|
||||
* To reduce the number of updates to the remembered set the post-barrier
|
||||
* filters updates to fields in objects located in the Young Generation,
|
||||
* the same region as the reference, when the NULL is being written or
|
||||
* the same region as the reference, when the null is being written or
|
||||
* if the card is already marked as dirty by an earlier write.
|
||||
*
|
||||
* Under certain circumstances it is possible to avoid generating the
|
||||
@ -313,7 +313,7 @@ bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
|
||||
return false; // cannot unalias unless there are precise offsets
|
||||
}
|
||||
|
||||
if (alloc == NULL) {
|
||||
if (alloc == nullptr) {
|
||||
return false; // No allocation found
|
||||
}
|
||||
|
||||
@ -377,13 +377,13 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
|
||||
Node* val,
|
||||
BasicType bt,
|
||||
bool use_precise) const {
|
||||
// If we are writing a NULL then we need no post barrier
|
||||
// If we are writing a null then we need no post barrier
|
||||
|
||||
if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
|
||||
// Must be NULL
|
||||
if (val != nullptr && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
|
||||
// Must be null
|
||||
const Type* t = val->bottom_type();
|
||||
assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
|
||||
// No post barrier if writing NULLx
|
||||
assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be null");
|
||||
// No post barrier if writing null
|
||||
return;
|
||||
}
|
||||
|
||||
@ -406,7 +406,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
|
||||
adr = obj;
|
||||
}
|
||||
// (Else it's an array (or unknown), and we want more precise card marks.)
|
||||
assert(adr != NULL, "");
|
||||
assert(adr != nullptr, "");
|
||||
|
||||
IdealKit ideal(kit, true);
|
||||
|
||||
@ -448,7 +448,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
|
||||
|
||||
// If we know the value being stored does it cross regions?
|
||||
|
||||
if (val != NULL) {
|
||||
if (val != nullptr) {
|
||||
// Does the store cause us to cross regions?
|
||||
|
||||
// Should be able to do an unsigned compare of region_size instead of
|
||||
@ -459,7 +459,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
|
||||
// if (xor_res == 0) same region so skip
|
||||
__ if_then(xor_res, BoolTest::ne, zeroX, likely); {
|
||||
|
||||
// No barrier if we are storing a NULL
|
||||
// No barrier if we are storing a null.
|
||||
__ if_then(val, BoolTest::ne, kit->null(), likely); {
|
||||
|
||||
// Ok must mark the card if not already dirty
|
||||
@ -509,7 +509,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off
|
||||
|
||||
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
|
||||
const TypeX* otype = offset->find_intptr_t_type();
|
||||
if (otype != NULL && otype->is_con() &&
|
||||
if (otype != nullptr && otype->is_con() &&
|
||||
otype->get_con() != java_lang_ref_Reference::referent_offset()) {
|
||||
// Constant offset but not the reference_offset so just return
|
||||
return;
|
||||
@ -517,14 +517,14 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off
|
||||
|
||||
// We only need to generate the runtime guards for instances.
|
||||
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
|
||||
if (btype != NULL) {
|
||||
if (btype != nullptr) {
|
||||
if (btype->isa_aryptr()) {
|
||||
// Array type so nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
const TypeInstPtr* itype = btype->isa_instptr();
|
||||
if (itype != NULL) {
|
||||
if (itype != nullptr) {
|
||||
// Can the klass of base_oop be statically determined to be
|
||||
// _not_ a sub-class of Reference and _not_ Object?
|
||||
ciKlass* klass = itype->instance_klass();
|
||||
@ -563,7 +563,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off
|
||||
__ sync_kit(kit);
|
||||
|
||||
Node* one = __ ConI(1);
|
||||
// is_instof == 0 if base_oop == NULL
|
||||
// is_instof == 0 if base_oop == nullptr
|
||||
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
|
||||
|
||||
// Update graphKit from IdeakKit.
|
||||
@ -572,7 +572,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
pre_barrier(kit, false /* do_load */,
|
||||
__ ctrl(),
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
|
||||
pre_val /* pre_val */,
|
||||
T_OBJECT);
|
||||
if (need_mem_bar) {
|
||||
@ -647,7 +647,7 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
pre_barrier(kit, false /* do_load */,
|
||||
kit->control(),
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
|
||||
load /* pre_val */, T_OBJECT);
|
||||
// Add memory barrier to prevent commoning reads from this field
|
||||
// across safepoint since GC can change its value.
|
||||
@ -669,7 +669,7 @@ bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
return false;
|
||||
}
|
||||
CallLeafNode *call = node->as_CallLeaf();
|
||||
if (call->_name == NULL) {
|
||||
if (call->_name == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -720,14 +720,14 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c
|
||||
// but the new allocation is passed to arraycopy stub and it could not
|
||||
// be scalar replaced. So we don't check the case.
|
||||
|
||||
// An other case of only one user (Xor) is when the value check for NULL
|
||||
// An other case of only one user (Xor) is when the value check for null
|
||||
// in G1 post barrier is folded after CCP so the code which used URShift
|
||||
// is removed.
|
||||
|
||||
// Take Region node before eliminating post barrier since it also
|
||||
// eliminates CastP2X node when it has only one user.
|
||||
Node* this_region = node->in(0);
|
||||
assert(this_region != NULL, "");
|
||||
assert(this_region != nullptr, "");
|
||||
|
||||
// Remove G1 post barrier.
|
||||
|
||||
@ -735,7 +735,7 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c
|
||||
// checks if the store done to a different from the value's region.
|
||||
// And replace Cmp with #0 (false) to collapse G1 post barrier.
|
||||
Node* xorx = node->find_out_with(Op_XorX);
|
||||
if (xorx != NULL) {
|
||||
if (xorx != nullptr) {
|
||||
Node* shift = xorx->unique_out();
|
||||
Node* cmpx = shift->unique_out();
|
||||
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
|
||||
@ -746,7 +746,7 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c
|
||||
// Remove G1 pre barrier.
|
||||
|
||||
// Search "if (marking != 0)" check and set it to "false".
|
||||
// There is no G1 pre barrier if previous stored value is NULL
|
||||
// There is no G1 pre barrier if previous stored value is null
|
||||
// (for example, after initialization).
|
||||
if (this_region->is_Region() && this_region->req() == 3) {
|
||||
int ind = 1;
|
||||
@ -777,10 +777,10 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c
|
||||
// Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
|
||||
// is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
|
||||
Node* shift = node->find_out_with(Op_URShiftX);
|
||||
assert(shift != NULL, "missing G1 post barrier");
|
||||
assert(shift != nullptr, "missing G1 post barrier");
|
||||
Node* addp = shift->unique_out();
|
||||
Node* load = addp->find_out_with(Op_LoadB);
|
||||
assert(load != NULL, "missing G1 post barrier");
|
||||
assert(load != nullptr, "missing G1 post barrier");
|
||||
Node* cmpx = load->unique_out();
|
||||
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
|
||||
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
|
||||
@ -797,27 +797,27 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c
|
||||
|
||||
Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
|
||||
if (!use_ReduceInitialCardMarks() &&
|
||||
c != NULL && c->is_Region() && c->req() == 3) {
|
||||
c != nullptr && c->is_Region() && c->req() == 3) {
|
||||
for (uint i = 1; i < c->req(); i++) {
|
||||
if (c->in(i) != NULL && c->in(i)->is_Region() &&
|
||||
if (c->in(i) != nullptr && c->in(i)->is_Region() &&
|
||||
c->in(i)->req() == 3) {
|
||||
Node* r = c->in(i);
|
||||
for (uint j = 1; j < r->req(); j++) {
|
||||
if (r->in(j) != NULL && r->in(j)->is_Proj() &&
|
||||
r->in(j)->in(0) != NULL &&
|
||||
if (r->in(j) != nullptr && r->in(j)->is_Proj() &&
|
||||
r->in(j)->in(0) != nullptr &&
|
||||
r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
|
||||
r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
|
||||
Node* call = r->in(j)->in(0);
|
||||
c = c->in(i == 1 ? 2 : 1);
|
||||
if (c != NULL && c->Opcode() != Op_Parm) {
|
||||
if (c != nullptr && c->Opcode() != Op_Parm) {
|
||||
c = c->in(0);
|
||||
if (c != NULL) {
|
||||
if (c != nullptr) {
|
||||
c = c->in(0);
|
||||
assert(call->in(0) == NULL ||
|
||||
call->in(0)->in(0) == NULL ||
|
||||
call->in(0)->in(0)->in(0) == NULL ||
|
||||
call->in(0)->in(0)->in(0)->in(0) == NULL ||
|
||||
call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
|
||||
assert(call->in(0) == nullptr ||
|
||||
call->in(0)->in(0) == nullptr ||
|
||||
call->in(0)->in(0)->in(0) == nullptr ||
|
||||
call->in(0)->in(0)->in(0)->in(0) == nullptr ||
|
||||
call->in(0)->in(0)->in(0)->in(0)->in(0) == nullptr ||
|
||||
c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
|
||||
return c;
|
||||
}
|
||||
@ -864,7 +864,7 @@ bool G1BarrierSetC2::has_cas_in_use_chain(Node *n) const {
|
||||
void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /*output*/) const {
|
||||
assert(loads.size() == 0, "Loads list should be empty");
|
||||
Node* pre_val_if = marking_if->find_out_with(Op_IfTrue)->find_out_with(Op_If);
|
||||
if (pre_val_if != NULL) {
|
||||
if (pre_val_if != nullptr) {
|
||||
Unique_Node_List visited;
|
||||
Node_List worklist;
|
||||
Node* pre_val = pre_val_if->in(1)->in(1)->in(1);
|
||||
@ -891,7 +891,7 @@ void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /
|
||||
continue;
|
||||
}
|
||||
if (x->is_Load() || x->is_LoadStore()) {
|
||||
assert(x->in(0) != NULL, "Pre-val load has to have a control");
|
||||
assert(x->in(0) != nullptr, "Pre-val load has to have a control");
|
||||
loads.push(x);
|
||||
continue;
|
||||
}
|
||||
@ -935,7 +935,7 @@ void G1BarrierSetC2::verify_no_safepoints(Compile* compile, Node* marking_check_
|
||||
worklist.push(marking_check_if);
|
||||
while (worklist.size() > 0 && found < controls.size()) {
|
||||
Node* x = worklist.pop();
|
||||
if (x == NULL || x == compile->top()) continue;
|
||||
if (x == nullptr || x == compile->top()) continue;
|
||||
if (visited.member(x)) {
|
||||
continue;
|
||||
} else {
|
||||
@ -973,7 +973,7 @@ void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) co
|
||||
worklist.push(compile->root());
|
||||
while (worklist.size() > 0) {
|
||||
Node* x = worklist.pop();
|
||||
if (x == NULL || x == compile->top()) continue;
|
||||
if (x == nullptr || x == compile->top()) continue;
|
||||
if (visited.member(x)) {
|
||||
continue;
|
||||
} else {
|
||||
@ -1013,7 +1013,7 @@ void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) co
|
||||
if_ctrl = if_ctrl->in(0)->in(0);
|
||||
}
|
||||
}
|
||||
assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
|
||||
assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
|
||||
|
||||
Unique_Node_List loads;
|
||||
verify_pre_load(iff, loads);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,25 +33,25 @@
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
|
||||
HeapRegion* G1AllocRegion::_dummy_region = NULL;
|
||||
G1CollectedHeap* G1AllocRegion::_g1h = nullptr;
|
||||
HeapRegion* G1AllocRegion::_dummy_region = nullptr;
|
||||
|
||||
void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
|
||||
assert(_dummy_region == NULL, "should be set once");
|
||||
assert(dummy_region != NULL, "pre-condition");
|
||||
assert(_dummy_region == nullptr, "should be set once");
|
||||
assert(dummy_region != nullptr, "pre-condition");
|
||||
assert(dummy_region->free() == 0, "pre-condition");
|
||||
|
||||
// Make sure that any allocation attempt on this region will fail
|
||||
// and will not trigger any asserts.
|
||||
DEBUG_ONLY(size_t assert_tmp);
|
||||
assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail");
|
||||
assert(dummy_region->par_allocate(1, 1, &assert_tmp) == nullptr, "should fail");
|
||||
|
||||
_g1h = g1h;
|
||||
_dummy_region = dummy_region;
|
||||
}
|
||||
|
||||
size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
|
||||
assert(alloc_region != NULL && alloc_region != _dummy_region,
|
||||
assert(alloc_region != nullptr && alloc_region != _dummy_region,
|
||||
"pre-condition");
|
||||
size_t result = 0;
|
||||
|
||||
@ -73,7 +73,7 @@ size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
|
||||
|
||||
while (free_word_size >= min_word_size_to_fill) {
|
||||
HeapWord* dummy = par_allocate(alloc_region, free_word_size);
|
||||
if (dummy != NULL) {
|
||||
if (dummy != nullptr) {
|
||||
// If the allocation was successful we should fill in the space. If the
|
||||
// allocation was in old any necessary BOT updates will be done.
|
||||
alloc_region->fill_with_dummy_object(dummy, free_word_size);
|
||||
@ -115,7 +115,7 @@ size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
|
||||
}
|
||||
|
||||
size_t G1AllocRegion::retire(bool fill_up) {
|
||||
assert_alloc_region(_alloc_region != NULL, "not initialized properly");
|
||||
assert_alloc_region(_alloc_region != nullptr, "not initialized properly");
|
||||
|
||||
size_t waste = 0;
|
||||
|
||||
@ -137,12 +137,12 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
|
||||
trace("attempting region allocation");
|
||||
HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
|
||||
if (new_alloc_region != NULL) {
|
||||
if (new_alloc_region != nullptr) {
|
||||
new_alloc_region->reset_pre_dummy_top();
|
||||
// Need to do this before the allocation
|
||||
_used_bytes_before = new_alloc_region->used();
|
||||
HeapWord* result = allocate(new_alloc_region, word_size);
|
||||
assert_alloc_region(result != NULL, "the allocation should succeeded");
|
||||
assert_alloc_region(result != nullptr, "the allocation should succeeded");
|
||||
|
||||
OrderAccess::storestore();
|
||||
// Note that we first perform the allocation and then we store the
|
||||
@ -153,15 +153,15 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
return result;
|
||||
} else {
|
||||
trace("region allocation failed");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void G1AllocRegion::init() {
|
||||
trace("initializing");
|
||||
assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
|
||||
assert_alloc_region(_dummy_region != NULL, "should have been set");
|
||||
assert_alloc_region(_alloc_region == nullptr && _used_bytes_before == 0, "pre-condition");
|
||||
assert_alloc_region(_dummy_region != nullptr, "should have been set");
|
||||
_alloc_region = _dummy_region;
|
||||
_count = 0;
|
||||
trace("initialized");
|
||||
@ -171,7 +171,7 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
|
||||
trace("setting");
|
||||
// We explicitly check that the region is not empty to make sure we
|
||||
// maintain the "the alloc region cannot be empty" invariant.
|
||||
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
|
||||
assert_alloc_region(alloc_region != nullptr && !alloc_region->is_empty(), "pre-condition");
|
||||
assert_alloc_region(_alloc_region == _dummy_region &&
|
||||
_used_bytes_before == 0 && _count == 0,
|
||||
"pre-condition");
|
||||
@ -186,7 +186,7 @@ void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
|
||||
trace("update");
|
||||
// We explicitly check that the region is not empty to make sure we
|
||||
// maintain the "the alloc region cannot be empty" invariant.
|
||||
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
|
||||
assert_alloc_region(alloc_region != nullptr && !alloc_region->is_empty(), "pre-condition");
|
||||
|
||||
_alloc_region = alloc_region;
|
||||
_count += 1;
|
||||
@ -198,9 +198,9 @@ HeapRegion* G1AllocRegion::release() {
|
||||
HeapRegion* alloc_region = _alloc_region;
|
||||
retire(false /* fill_up */);
|
||||
assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
|
||||
_alloc_region = NULL;
|
||||
_alloc_region = nullptr;
|
||||
trace("released");
|
||||
return (alloc_region == _dummy_region) ? NULL : alloc_region;
|
||||
return (alloc_region == _dummy_region) ? nullptr : alloc_region;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -217,7 +217,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_
|
||||
|
||||
bool detailed_info = log.is_trace();
|
||||
|
||||
if ((actual_word_size == 0 && result == NULL) || detailed_info) {
|
||||
if ((actual_word_size == 0 && result == nullptr) || detailed_info) {
|
||||
ResourceMark rm;
|
||||
LogStream ls_trace(log.trace());
|
||||
LogStream ls_debug(log.debug());
|
||||
@ -225,8 +225,8 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_
|
||||
|
||||
out->print("%s: %u ", _name, _count);
|
||||
|
||||
if (_alloc_region == NULL) {
|
||||
out->print("NULL");
|
||||
if (_alloc_region == nullptr) {
|
||||
out->print("null");
|
||||
} else if (_alloc_region == _dummy_region) {
|
||||
out->print("DUMMY");
|
||||
} else {
|
||||
@ -236,7 +236,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_
|
||||
out->print(" : %s", str);
|
||||
|
||||
if (detailed_info) {
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
|
||||
min_word_size, desired_word_size, actual_word_size, p2i(result));
|
||||
} else if (min_word_size != 0) {
|
||||
@ -251,7 +251,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_
|
||||
G1AllocRegion::G1AllocRegion(const char* name,
|
||||
bool bot_updates,
|
||||
uint node_index)
|
||||
: _alloc_region(NULL),
|
||||
: _alloc_region(nullptr),
|
||||
_count(0),
|
||||
_used_bytes_before(0),
|
||||
_name(name),
|
||||
@ -269,7 +269,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
}
|
||||
|
||||
void MutatorAllocRegion::init() {
|
||||
assert(_retained_alloc_region == NULL, "Pre-condition");
|
||||
assert(_retained_alloc_region == nullptr, "Pre-condition");
|
||||
G1AllocRegion::init();
|
||||
_wasted_bytes = 0;
|
||||
}
|
||||
@ -280,7 +280,7 @@ bool MutatorAllocRegion::should_retain(HeapRegion* region) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_retained_alloc_region != NULL &&
|
||||
if (_retained_alloc_region != nullptr &&
|
||||
free_bytes < _retained_alloc_region->free()) {
|
||||
return false;
|
||||
}
|
||||
@ -292,12 +292,12 @@ size_t MutatorAllocRegion::retire(bool fill_up) {
|
||||
size_t waste = 0;
|
||||
trace("retiring");
|
||||
HeapRegion* current_region = get();
|
||||
if (current_region != NULL) {
|
||||
if (current_region != nullptr) {
|
||||
// Retain the current region if it fits a TLAB and has more
|
||||
// free than the currently retained region.
|
||||
if (should_retain(current_region)) {
|
||||
trace("mutator retained");
|
||||
if (_retained_alloc_region != NULL) {
|
||||
if (_retained_alloc_region != nullptr) {
|
||||
waste = retire_internal(_retained_alloc_region, true);
|
||||
}
|
||||
_retained_alloc_region = current_region;
|
||||
@ -315,12 +315,12 @@ size_t MutatorAllocRegion::retire(bool fill_up) {
|
||||
size_t MutatorAllocRegion::used_in_alloc_regions() {
|
||||
size_t used = 0;
|
||||
HeapRegion* hr = get();
|
||||
if (hr != NULL) {
|
||||
if (hr != nullptr) {
|
||||
used += hr->used();
|
||||
}
|
||||
|
||||
hr = _retained_alloc_region;
|
||||
if (hr != NULL) {
|
||||
if (hr != nullptr) {
|
||||
used += hr->used();
|
||||
}
|
||||
return used;
|
||||
@ -332,9 +332,9 @@ HeapRegion* MutatorAllocRegion::release() {
|
||||
// The retained alloc region must be retired and this must be
|
||||
// done after the above call to release the mutator alloc region,
|
||||
// since it might update the _retained_alloc_region member.
|
||||
if (_retained_alloc_region != NULL) {
|
||||
if (_retained_alloc_region != nullptr) {
|
||||
_wasted_bytes += retire_internal(_retained_alloc_region, false);
|
||||
_retained_alloc_region = NULL;
|
||||
_retained_alloc_region = nullptr;
|
||||
}
|
||||
log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)",
|
||||
count(),
|
||||
@ -359,7 +359,7 @@ size_t G1GCAllocRegion::retire(bool fill_up) {
|
||||
HeapRegion* retired = get();
|
||||
size_t end_waste = G1AllocRegion::retire(fill_up);
|
||||
// Do not count retirement of the dummy allocation region.
|
||||
if (retired != NULL) {
|
||||
if (retired != nullptr) {
|
||||
_stats->add_region_end_waste(end_waste / HeapWordSize);
|
||||
}
|
||||
return end_waste;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,10 +46,10 @@ private:
|
||||
// of. The invariant is that if this object is initialized (i.e.,
|
||||
// init() has been called and release() has not) then _alloc_region
|
||||
// is either an active allocating region or the dummy region (i.e.,
|
||||
// it can never be NULL) and this object can be used to satisfy
|
||||
// it can never be null) and this object can be used to satisfy
|
||||
// allocation requests. If this object is not initialized
|
||||
// (i.e. init() has not been called or release() has been called)
|
||||
// then _alloc_region is NULL and this object should not be used to
|
||||
// then _alloc_region is null and this object should not be used to
|
||||
// satisfy allocation requests (it was done this way to force the
|
||||
// correct use of init() and release()).
|
||||
HeapRegion* volatile _alloc_region;
|
||||
@ -75,7 +75,7 @@ private:
|
||||
// purpose and it is not part of the heap) that is full (i.e., top()
|
||||
// == end()). When we don't have a valid active region we make
|
||||
// _alloc_region point to this. This allows us to skip checking
|
||||
// whether the _alloc_region is NULL or not.
|
||||
// whether the _alloc_region is null or not.
|
||||
static HeapRegion* _dummy_region;
|
||||
|
||||
// After a region is allocated by alloc_new_region, this
|
||||
@ -144,7 +144,7 @@ public:
|
||||
HeapRegion* get() const {
|
||||
HeapRegion * hr = _alloc_region;
|
||||
// Make sure that the dummy region does not escape this class.
|
||||
return (hr == _dummy_region) ? NULL : hr;
|
||||
return (hr == _dummy_region) ? nullptr : hr;
|
||||
}
|
||||
|
||||
uint count() { return _count; }
|
||||
@ -153,14 +153,14 @@ public:
|
||||
|
||||
// First-level allocation: Should be called without holding a
|
||||
// lock. It will try to allocate lock-free out of the active region,
|
||||
// or return NULL if it was unable to.
|
||||
// or return null if it was unable to.
|
||||
inline HeapWord* attempt_allocation(size_t word_size);
|
||||
// Perform an allocation out of the current allocation region, with the given
|
||||
// minimum and desired size. Returns the actual size allocated (between
|
||||
// minimum and desired size) in actual_word_size if the allocation has been
|
||||
// successful.
|
||||
// Should be called without holding a lock. It will try to allocate lock-free
|
||||
// out of the active region, or return NULL if it was unable to.
|
||||
// out of the active region, or return null if it was unable to.
|
||||
inline HeapWord* attempt_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size);
|
||||
@ -199,7 +199,7 @@ public:
|
||||
size_t min_word_size = 0,
|
||||
size_t desired_word_size = 0,
|
||||
size_t actual_word_size = 0,
|
||||
HeapWord* result = NULL) PRODUCT_RETURN;
|
||||
HeapWord* result = nullptr) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
class MutatorAllocRegion : public G1AllocRegion {
|
||||
@ -224,7 +224,7 @@ public:
|
||||
MutatorAllocRegion(uint node_index)
|
||||
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */, node_index),
|
||||
_wasted_bytes(0),
|
||||
_retained_alloc_region(NULL) { }
|
||||
_retained_alloc_region(nullptr) { }
|
||||
|
||||
// Returns the combined used memory in the current alloc region and
|
||||
// the retained alloc region.
|
||||
@ -235,13 +235,13 @@ public:
|
||||
// minimum and desired size) in actual_word_size if the allocation has been
|
||||
// successful.
|
||||
// Should be called without holding a lock. It will try to allocate lock-free
|
||||
// out of the retained region, or return NULL if it was unable to.
|
||||
// out of the retained region, or return null if it was unable to.
|
||||
inline HeapWord* attempt_retained_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size);
|
||||
|
||||
// This specialization of release() makes sure that the retained alloc
|
||||
// region is retired and set to NULL.
|
||||
// region is retired and set to null.
|
||||
virtual HeapRegion* release();
|
||||
|
||||
virtual void init();
|
||||
@ -261,7 +261,7 @@ protected:
|
||||
G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats,
|
||||
G1HeapRegionAttr::region_type_t purpose, uint node_index = G1NUMA::AnyNodeIndex)
|
||||
: G1AllocRegion(name, bot_updates, node_index), _stats(stats), _purpose(purpose) {
|
||||
assert(stats != NULL, "Must pass non-NULL PLAB statistics");
|
||||
assert(stats != nullptr, "Must pass non-null PLAB statistics");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,7 +43,7 @@ inline void G1AllocRegion::reset_alloc_region() {
|
||||
|
||||
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
|
||||
size_t word_size) {
|
||||
assert(alloc_region != NULL, "pre-condition");
|
||||
assert(alloc_region != nullptr, "pre-condition");
|
||||
|
||||
return alloc_region->allocate(word_size);
|
||||
}
|
||||
@ -57,7 +57,7 @@ inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
|
||||
size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
assert(alloc_region != NULL, "pre-condition");
|
||||
assert(alloc_region != nullptr, "pre-condition");
|
||||
assert(!alloc_region->is_empty(), "pre-condition");
|
||||
|
||||
return alloc_region->par_allocate(min_word_size, desired_word_size, actual_word_size);
|
||||
@ -72,15 +72,15 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
HeapRegion* alloc_region = _alloc_region;
|
||||
assert_alloc_region(alloc_region != NULL, "not initialized properly");
|
||||
assert_alloc_region(alloc_region != nullptr, "not initialized properly");
|
||||
|
||||
HeapWord* result = par_allocate(alloc_region, min_word_size, desired_word_size, actual_word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
trace("alloc", min_word_size, desired_word_size, *actual_word_size, result);
|
||||
return result;
|
||||
}
|
||||
trace("alloc failed", min_word_size, desired_word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size) {
|
||||
@ -92,7 +92,7 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
HeapWord* result = attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -104,39 +104,39 @@ inline HeapWord* G1AllocRegion::attempt_allocation_using_new_region(size_t min_w
|
||||
size_t* actual_word_size) {
|
||||
retire(true /* fill_up */);
|
||||
HeapWord* result = new_alloc_region_and_allocate(desired_word_size, false /* force */);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
*actual_word_size = desired_word_size;
|
||||
trace("alloc locked (second attempt)", min_word_size, desired_word_size, *actual_word_size, result);
|
||||
return result;
|
||||
}
|
||||
trace("alloc locked failed", min_word_size, desired_word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size) {
|
||||
assert_alloc_region(_alloc_region != NULL, "not initialized properly");
|
||||
assert_alloc_region(_alloc_region != nullptr, "not initialized properly");
|
||||
|
||||
trace("forcing alloc", word_size, word_size);
|
||||
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
trace("alloc forced", word_size, word_size, word_size, result);
|
||||
return result;
|
||||
}
|
||||
trace("alloc forced failed", word_size, word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline HeapWord* MutatorAllocRegion::attempt_retained_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
if (_retained_alloc_region != NULL) {
|
||||
if (_retained_alloc_region != nullptr) {
|
||||
HeapWord* result = par_allocate(_retained_alloc_region, min_word_size, desired_word_size, actual_word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
trace("alloc retained", min_word_size, desired_word_size, *actual_word_size, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1ALLOCREGION_INLINE_HPP
|
||||
|
@ -43,10 +43,10 @@ G1Allocator::G1Allocator(G1CollectedHeap* heap) :
|
||||
_survivor_is_full(false),
|
||||
_old_is_full(false),
|
||||
_num_alloc_regions(_numa->num_active_nodes()),
|
||||
_mutator_alloc_regions(NULL),
|
||||
_survivor_gc_alloc_regions(NULL),
|
||||
_mutator_alloc_regions(nullptr),
|
||||
_survivor_gc_alloc_regions(nullptr),
|
||||
_old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
|
||||
_retained_old_gc_alloc_region(NULL) {
|
||||
_retained_old_gc_alloc_region(nullptr) {
|
||||
|
||||
_mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);
|
||||
_survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC);
|
||||
@ -70,13 +70,13 @@ G1Allocator::~G1Allocator() {
|
||||
#ifdef ASSERT
|
||||
bool G1Allocator::has_mutator_alloc_region() {
|
||||
uint node_index = current_node_index();
|
||||
return mutator_alloc_region(node_index)->get() != NULL;
|
||||
return mutator_alloc_region(node_index)->get() != nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
void G1Allocator::init_mutator_alloc_regions() {
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");
|
||||
assert(mutator_alloc_region(i)->get() == nullptr, "pre-condition");
|
||||
mutator_alloc_region(i)->init();
|
||||
}
|
||||
}
|
||||
@ -84,7 +84,7 @@ void G1Allocator::init_mutator_alloc_regions() {
|
||||
void G1Allocator::release_mutator_alloc_regions() {
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
mutator_alloc_region(i)->release();
|
||||
assert(mutator_alloc_region(i)->get() == NULL, "post-condition");
|
||||
assert(mutator_alloc_region(i)->get() == nullptr, "post-condition");
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
|
||||
OldGCAllocRegion* old,
|
||||
HeapRegion** retained_old) {
|
||||
HeapRegion* retained_region = *retained_old;
|
||||
*retained_old = NULL;
|
||||
*retained_old = nullptr;
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
@ -107,7 +107,7 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
|
||||
// during a cleanup and was added to the free list, but
|
||||
// has been subsequently used to allocate a humongous
|
||||
// object that may be less than the region size).
|
||||
if (retained_region != NULL &&
|
||||
if (retained_region != nullptr &&
|
||||
!retained_region->in_collection_set() &&
|
||||
!(retained_region->top() == retained_region->end()) &&
|
||||
!retained_region->is_empty() &&
|
||||
@ -150,7 +150,7 @@ void G1Allocator::release_gc_alloc_regions(G1EvacInfo* evacuation_info) {
|
||||
|
||||
// If we have an old GC alloc region to release, we'll save it in
|
||||
// _retained_old_gc_alloc_region. If we don't
|
||||
// _retained_old_gc_alloc_region will become NULL. This is what we
|
||||
// _retained_old_gc_alloc_region will become null. This is what we
|
||||
// want either way so no reason to check explicitly for either
|
||||
// condition.
|
||||
_retained_old_gc_alloc_region = old_gc_alloc_region()->release();
|
||||
@ -158,10 +158,10 @@ void G1Allocator::release_gc_alloc_regions(G1EvacInfo* evacuation_info) {
|
||||
|
||||
void G1Allocator::abandon_gc_alloc_regions() {
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition");
|
||||
assert(survivor_gc_alloc_region(i)->get() == nullptr, "pre-condition");
|
||||
}
|
||||
assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
assert(old_gc_alloc_region()->get() == nullptr, "pre-condition");
|
||||
_retained_old_gc_alloc_region = nullptr;
|
||||
}
|
||||
|
||||
bool G1Allocator::survivor_is_full() const {
|
||||
@ -191,7 +191,7 @@ size_t G1Allocator::unsafe_max_tlab_alloc() {
|
||||
uint node_index = current_node_index();
|
||||
HeapRegion* hr = mutator_alloc_region(node_index)->get();
|
||||
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
|
||||
if (hr == NULL) {
|
||||
if (hr == nullptr) {
|
||||
return max_tlab;
|
||||
} else {
|
||||
return clamp(hr->free(), MinTLABSize, max_tlab);
|
||||
@ -199,7 +199,7 @@ size_t G1Allocator::unsafe_max_tlab_alloc() {
|
||||
}
|
||||
|
||||
size_t G1Allocator::used_in_alloc_regions() {
|
||||
assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
|
||||
assert(Heap_lock->owner() != nullptr, "Should be owned on this thread's behalf.");
|
||||
size_t used = 0;
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
used += mutator_alloc_region(i)->used_in_alloc_regions();
|
||||
@ -213,7 +213,7 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
uint node_index) {
|
||||
size_t temp = 0;
|
||||
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);
|
||||
assert(result == NULL || temp == word_size,
|
||||
assert(result == nullptr || temp == word_size,
|
||||
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
word_size, temp, p2i(result));
|
||||
return result;
|
||||
@ -231,7 +231,7 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
return nullptr; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
|
||||
HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL && !survivor_is_full()) {
|
||||
if (result == nullptr && !survivor_is_full()) {
|
||||
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Multiple threads may have queued at the FreeList_lock above after checking whether there
|
||||
// actually is still memory available. Redo the check under the lock to avoid unnecessary work;
|
||||
@ -254,12 +254,12 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
|
||||
result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
set_survivor_full();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
_g1h->dirty_young_block(result, *actual_word_size);
|
||||
}
|
||||
return result;
|
||||
@ -274,7 +274,7 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
|
||||
HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL && !old_is_full()) {
|
||||
if (result == nullptr && !old_is_full()) {
|
||||
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Multiple threads may have queued at the FreeList_lock above after checking whether there
|
||||
// actually is still memory available. Redo the check under the lock to avoid unnecessary work;
|
||||
@ -283,7 +283,7 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
|
||||
result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
set_old_full();
|
||||
}
|
||||
}
|
||||
@ -398,15 +398,15 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
&actual_plab_size,
|
||||
node_index);
|
||||
|
||||
assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
|
||||
assert(buf == nullptr || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
|
||||
"Requested at minimum %zu, desired %zu words, but got %zu at " PTR_FORMAT,
|
||||
required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
|
||||
|
||||
if (buf != NULL) {
|
||||
if (buf != nullptr) {
|
||||
alloc_buf->set_buf(buf, actual_plab_size);
|
||||
|
||||
HeapWord* const obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
|
||||
assert(obj != nullptr, "PLAB should have been big enough, tried to allocate "
|
||||
"%zu requiring %zu PLAB size %zu",
|
||||
word_sz, required_in_plab, plab_word_size);
|
||||
return obj;
|
||||
@ -416,7 +416,7 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
}
|
||||
// Try direct allocation.
|
||||
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
plab_data->_direct_allocated += word_sz;
|
||||
plab_data->_num_direct_allocations++;
|
||||
}
|
||||
@ -432,7 +432,7 @@ void G1PLABAllocator::flush_and_retire_stats(uint num_workers) {
|
||||
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
if (buf != nullptr) {
|
||||
buf->flush_and_retire_stats(stats);
|
||||
}
|
||||
}
|
||||
@ -458,7 +458,7 @@ size_t G1PLABAllocator::waste() const {
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
if (buf != nullptr) {
|
||||
result += buf->waste();
|
||||
}
|
||||
}
|
||||
@ -475,7 +475,7 @@ size_t G1PLABAllocator::undo_waste() const {
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
if (buf != nullptr) {
|
||||
result += buf->undo_waste();
|
||||
}
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ public:
|
||||
size_t plab_size(G1HeapRegionAttr which) const;
|
||||
|
||||
// Allocate word_sz words in dest, either directly into the regions or by
|
||||
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
|
||||
// allocating a new PLAB. Returns the address of the allocated memory, null if
|
||||
// not successful. Plab_refill_failed indicates whether an attempt to refill the
|
||||
// PLAB failed or not.
|
||||
HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
@ -212,7 +212,7 @@ public:
|
||||
uint node_index);
|
||||
|
||||
// Allocate word_sz words in the PLAB of dest. Returns the address of the
|
||||
// allocated memory, NULL if not successful.
|
||||
// allocated memory, null if not successful.
|
||||
inline HeapWord* plab_allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
uint node_index);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,7 +55,7 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
|
||||
uint node_index = current_node_index();
|
||||
|
||||
HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
|
||||
uint node_index = current_node_index();
|
||||
HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size);
|
||||
|
||||
assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL,
|
||||
assert(result != nullptr || mutator_alloc_region(node_index)->get() == nullptr,
|
||||
"Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(node_index)->get()));
|
||||
return result;
|
||||
}
|
||||
@ -80,7 +80,7 @@ inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest, uint node_inde
|
||||
assert(dest.is_valid(),
|
||||
"Allocation buffer index out of bounds: %s", dest.get_type_str());
|
||||
assert(_dest_data[dest.type()]._alloc_buffer != nullptr,
|
||||
"Allocation buffer is NULL: %s", dest.get_type_str());
|
||||
"Allocation buffer is null: %s", dest.get_type_str());
|
||||
return alloc_buffer(dest.type(), node_index);
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
|
||||
bool* refill_failed,
|
||||
uint node_index) {
|
||||
HeapWord* const obj = plab_allocate(dest, word_sz, node_index);
|
||||
if (obj != NULL) {
|
||||
if (obj != nullptr) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -81,9 +81,9 @@ void G1Arguments::initialize_verification_types() {
|
||||
char* save_ptr;
|
||||
|
||||
char* token = strtok_r(type_list, delimiter, &save_ptr);
|
||||
while (token != NULL) {
|
||||
while (token != nullptr) {
|
||||
parse_verification_type(token);
|
||||
token = strtok_r(NULL, delimiter, &save_ptr);
|
||||
token = strtok_r(nullptr, delimiter, &save_ptr);
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(char, type_list);
|
||||
}
|
||||
@ -167,7 +167,7 @@ void G1Arguments::initialize() {
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
|
||||
if (ParallelGCThreads == 0) {
|
||||
assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
|
||||
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
|
||||
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr);
|
||||
}
|
||||
|
||||
// When dumping the CDS archive we want to reduce fragmentation by
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -98,7 +98,7 @@ inline void G1BarrierSet::enqueue_preloaded_if_weak(DecoratorSet decorators, oop
|
||||
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
|
||||
const bool needs_enqueue = (!peek && !on_strong_oop_ref);
|
||||
|
||||
if (needs_enqueue && value != NULL) {
|
||||
if (needs_enqueue && value != nullptr) {
|
||||
enqueue_preloaded(value);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,10 +27,10 @@
|
||||
#include "memory/padded.inline.hpp"
|
||||
|
||||
G1BiasedMappedArrayBase::G1BiasedMappedArrayBase() :
|
||||
_alloc_base(NULL),
|
||||
_base(NULL),
|
||||
_alloc_base(nullptr),
|
||||
_base(nullptr),
|
||||
_length(0),
|
||||
_biased_base(NULL),
|
||||
_biased_base(nullptr),
|
||||
_bias(0),
|
||||
_shift_by(0) { }
|
||||
|
||||
@ -47,19 +47,19 @@ address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t ele
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
|
||||
guarantee(_base != NULL, "Array not initialized");
|
||||
guarantee(_base != nullptr, "Array not initialized");
|
||||
guarantee(index < length(), "Index out of bounds index: " SIZE_FORMAT " length: " SIZE_FORMAT, index, length());
|
||||
}
|
||||
|
||||
void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
|
||||
guarantee(_biased_base != NULL, "Array not initialized");
|
||||
guarantee(_biased_base != nullptr, "Array not initialized");
|
||||
guarantee(biased_index >= bias() && biased_index < (bias() + length()),
|
||||
"Biased index out of bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT,
|
||||
biased_index, bias(), length());
|
||||
}
|
||||
|
||||
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
|
||||
guarantee(_biased_base != NULL, "Array not initialized");
|
||||
guarantee(_biased_base != nullptr, "Array not initialized");
|
||||
guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
|
||||
"Biased index out of inclusive bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT,
|
||||
biased_index, bias(), length());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,7 +57,7 @@ protected:
|
||||
// Initialize the members of this class. The biased start address of this array
|
||||
// is the bias (in elements) multiplied by the element size.
|
||||
void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
|
||||
assert(base != NULL, "just checking");
|
||||
assert(base != nullptr, "just checking");
|
||||
assert(length > 0, "just checking");
|
||||
assert(shift_by < sizeof(uintptr_t) * 8, "Shifting by %u, larger than word size?", shift_by);
|
||||
_base = base;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,7 +38,7 @@
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage) :
|
||||
_reserved(heap), _offset_array(NULL) {
|
||||
_reserved(heap), _offset_array(nullptr) {
|
||||
|
||||
MemRegion bot_reserved = storage->reserved();
|
||||
|
||||
@ -186,7 +186,7 @@ void G1BlockOffsetTablePart::update_for_block_work(HeapWord* blk_start,
|
||||
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
|
||||
size_t const index = _bot->index_for_raw(cur_card_boundary);
|
||||
|
||||
assert(blk_start != NULL && blk_end > blk_start,
|
||||
assert(blk_start != nullptr && blk_end > blk_start,
|
||||
"phantom block");
|
||||
assert(blk_end > cur_card_boundary, "should be past cur_card_boundary");
|
||||
assert(blk_start <= cur_card_boundary, "blk_start should be at or before cur_card_boundary");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -859,7 +859,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
|
||||
|
||||
G1CardSetHashTableValue* table_entry = get_container(card_region);
|
||||
if (table_entry == nullptr) {
|
||||
st->print("NULL card set");
|
||||
st->print("null card set");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,7 @@ class G1CardTableChangedListener : public G1MappingChangedListener {
|
||||
private:
|
||||
G1CardTable* _card_table;
|
||||
public:
|
||||
G1CardTableChangedListener() : _card_table(NULL) { }
|
||||
G1CardTableChangedListener() : _card_table(nullptr) { }
|
||||
|
||||
void set_card_table(G1CardTable* card_table) { _card_table = card_table; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,7 +85,7 @@ void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) {
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
if (bs_nm != nullptr) {
|
||||
bs_nm->disarm(nm);
|
||||
}
|
||||
}
|
||||
@ -101,7 +101,7 @@ void G1CodeBlobClosure::do_marking(nmethod* nm) {
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
if (bs_nm != nullptr) {
|
||||
bs_nm->disarm(nm);
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ public:
|
||||
|
||||
void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm == NULL) {
|
||||
if (nm == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ class G1CodeBlobClosure : public CodeBlobClosure {
|
||||
void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
|
||||
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(nullptr) {}
|
||||
|
||||
void do_oop(oop* o);
|
||||
void do_oop(narrowOop* o);
|
||||
|
@ -170,7 +170,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size,
|
||||
|
||||
HeapRegion* res = _hrm.allocate_free_region(type, node_index);
|
||||
|
||||
if (res == NULL && do_expand) {
|
||||
if (res == nullptr && do_expand) {
|
||||
// Currently, only attempts to allocate GC alloc regions set
|
||||
// do_expand to true. So, we should only reach here during a
|
||||
// safepoint.
|
||||
@ -186,7 +186,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size,
|
||||
// Given that expand_single_region() succeeded in expanding the heap, and we
|
||||
// always expand the heap by an amount aligned to the heap
|
||||
// region size, the free list should in theory not be empty.
|
||||
// In either case allocate_free_region() will check for NULL.
|
||||
// In either case allocate_free_region() will check for null.
|
||||
res = _hrm.allocate_free_region(type, node_index);
|
||||
}
|
||||
}
|
||||
@ -279,7 +279,7 @@ HeapWord*
|
||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
|
||||
uint num_regions,
|
||||
size_t word_size) {
|
||||
assert(first_hr != NULL, "pre-condition");
|
||||
assert(first_hr != nullptr, "pre-condition");
|
||||
assert(is_humongous(word_size), "word_size should be humongous");
|
||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||
|
||||
@ -348,12 +348,12 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
|
||||
// Policy: First try to allocate a humongous object in the free list.
|
||||
HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions);
|
||||
if (humongous_start == NULL) {
|
||||
if (humongous_start == nullptr) {
|
||||
// Policy: We could not find enough regions for the humongous object in the
|
||||
// free list. Look through the heap to find a mix of free and uncommitted regions.
|
||||
// If so, expand the heap and allocate the humongous object.
|
||||
humongous_start = _hrm.expand_and_allocate_humongous(obj_regions);
|
||||
if (humongous_start != NULL) {
|
||||
if (humongous_start != nullptr) {
|
||||
// We managed to find a region by expanding the heap.
|
||||
log_debug(gc, ergo, heap)("Heap expansion (humongous allocation request). Allocation request: " SIZE_FORMAT "B",
|
||||
word_size * HeapWordSize);
|
||||
@ -363,10 +363,10 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (humongous_start != NULL) {
|
||||
HeapWord* result = nullptr;
|
||||
if (humongous_start != nullptr) {
|
||||
result = humongous_obj_allocate_initialize_regions(humongous_start, obj_regions, word_size);
|
||||
assert(result != NULL, "it should always return a valid result");
|
||||
assert(result != nullptr, "it should always return a valid result");
|
||||
|
||||
// A successful humongous object allocation changes the used space
|
||||
// information of the old generation so we need to recalculate the
|
||||
@ -415,8 +415,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
// We will loop until a) we manage to successfully perform the
|
||||
// allocation or b) we successfully schedule a collection which
|
||||
// fails to perform the allocation. b) is the only case when we'll
|
||||
// return NULL.
|
||||
HeapWord* result = NULL;
|
||||
// return null.
|
||||
HeapWord* result = nullptr;
|
||||
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||
bool should_try_gc;
|
||||
uint gc_count_before;
|
||||
@ -427,7 +427,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
// Now that we have the lock, we first retry the allocation in case another
|
||||
// thread changed the region while we were waiting to acquire the lock.
|
||||
result = _allocator->attempt_allocation_locked(word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -438,7 +438,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
// No need for an ergo message here, can_expand_young_list() does this when
|
||||
// it returns true.
|
||||
result = _allocator->attempt_allocation_force(word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@ -454,8 +454,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
if (should_try_gc) {
|
||||
bool succeeded;
|
||||
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "only way to get back a non-NULL result");
|
||||
if (result != nullptr) {
|
||||
assert(succeeded, "only way to get back a non-null result");
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
|
||||
Thread::current()->name(), p2i(result));
|
||||
return result;
|
||||
@ -463,10 +463,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
|
||||
if (succeeded) {
|
||||
// We successfully scheduled a collection which failed to allocate. No
|
||||
// point in trying to allocate further. We'll just return NULL.
|
||||
// point in trying to allocate further. We'll just return null.
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
|
||||
Thread::current()->name(), word_size);
|
||||
@ -475,7 +475,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
|
||||
// The GCLocker is either active or the GCLocker initiated
|
||||
@ -495,7 +495,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
// iteration (after taking the Heap_lock).
|
||||
size_t dummy = 0;
|
||||
result = _allocator->attempt_allocation(word_size, word_size, &dummy);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -508,7 +508,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion range) {
|
||||
@ -637,13 +637,13 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
|
||||
|
||||
HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
*actual_word_size = desired_word_size;
|
||||
result = attempt_allocation_slow(desired_word_size);
|
||||
}
|
||||
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
assert(*actual_word_size != 0, "Actual size must have been set here");
|
||||
dirty_young_block(result, *actual_word_size);
|
||||
} else {
|
||||
@ -684,8 +684,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// We will loop until a) we manage to successfully perform the
|
||||
// allocation or b) we successfully schedule a collection which
|
||||
// fails to perform the allocation. b) is the only case when we'll
|
||||
// return NULL.
|
||||
HeapWord* result = NULL;
|
||||
// return null.
|
||||
HeapWord* result = nullptr;
|
||||
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||
bool should_try_gc;
|
||||
uint gc_count_before;
|
||||
@ -699,7 +699,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// regions, we'll first try to do the allocation without doing a
|
||||
// collection hoping that there's enough space in the heap.
|
||||
result = humongous_obj_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
policy()->old_gen_alloc_tracker()->
|
||||
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
|
||||
return result;
|
||||
@ -716,8 +716,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
if (should_try_gc) {
|
||||
bool succeeded;
|
||||
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "only way to get back a non-NULL result");
|
||||
if (result != nullptr) {
|
||||
assert(succeeded, "only way to get back a non-null result");
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
|
||||
Thread::current()->name(), p2i(result));
|
||||
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
|
||||
@ -728,10 +728,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
|
||||
if (succeeded) {
|
||||
// We successfully scheduled a collection which failed to allocate. No
|
||||
// point in trying to allocate further. We'll just return NULL.
|
||||
// point in trying to allocate further. We'll just return null.
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
|
||||
Thread::current()->name(), word_size);
|
||||
@ -740,7 +740,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
|
||||
// The GCLocker is either active or the GCLocker initiated
|
||||
@ -768,20 +768,20 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_mutator_alloc_region) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
|
||||
"the current alloc region was unexpectedly found to be non-NULL");
|
||||
"the current alloc region was unexpectedly found to be non-null");
|
||||
|
||||
if (!is_humongous(word_size)) {
|
||||
return _allocator->attempt_allocation_locked(word_size);
|
||||
} else {
|
||||
HeapWord* result = humongous_obj_allocate(word_size);
|
||||
if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
if (result != nullptr && policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
}
|
||||
return result;
|
||||
@ -925,7 +925,7 @@ bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
||||
soft_ref_policy()->should_clear_all_soft_refs();
|
||||
|
||||
G1FullGCMark gc_mark;
|
||||
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
|
||||
GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause(), true);
|
||||
G1FullCollector collector(this, do_clear_all_soft_refs, do_maximal_compaction, gc_mark.tracer());
|
||||
|
||||
collector.prepare_collection();
|
||||
@ -981,7 +981,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
||||
HeapWord* result =
|
||||
attempt_allocation_at_safepoint(word_size,
|
||||
expect_null_mutator_alloc_region);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -990,7 +990,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
||||
// expansion over collection. (This might change in the future if we can
|
||||
// do something smarter than full collection to satisfy a failed alloc.)
|
||||
result = expand_and_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1008,7 +1008,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
||||
maximal_compaction /* do_maximal_compaction */);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
@ -1023,7 +1023,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
false, /* expect_null_mutator_alloc_region */
|
||||
succeeded);
|
||||
|
||||
if (result != NULL || !*succeeded) {
|
||||
if (result != nullptr || !*succeeded) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1034,7 +1034,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
true, /* expect_null_mutator_alloc_region */
|
||||
succeeded);
|
||||
|
||||
if (result != NULL || !*succeeded) {
|
||||
if (result != nullptr || !*succeeded) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1045,7 +1045,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
true, /* expect_null_mutator_alloc_region */
|
||||
succeeded);
|
||||
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1056,13 +1056,13 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
// space available is large enough for the allocation, then a more
|
||||
// complete compaction phase than we've tried so far might be
|
||||
// appropriate.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Attempting to expand the heap sufficiently
|
||||
// to support an allocation of the given "word_size". If
|
||||
// successful, perform the allocation and return the address of the
|
||||
// allocated block, or else "NULL".
|
||||
// allocated block, or else null.
|
||||
|
||||
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
@ -1080,7 +1080,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
return attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers, double* expand_time_ms) {
|
||||
@ -1101,7 +1101,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker
|
||||
assert(regions_to_expand > 0, "Must expand by at least one region");
|
||||
|
||||
uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
|
||||
if (expand_time_ms != NULL) {
|
||||
if (expand_time_ms != nullptr) {
|
||||
*expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
|
||||
}
|
||||
|
||||
@ -1220,22 +1220,22 @@ public:
|
||||
|
||||
G1CollectedHeap::G1CollectedHeap() :
|
||||
CollectedHeap(),
|
||||
_service_thread(NULL),
|
||||
_periodic_gc_task(NULL),
|
||||
_free_arena_memory_task(NULL),
|
||||
_workers(NULL),
|
||||
_card_table(NULL),
|
||||
_service_thread(nullptr),
|
||||
_periodic_gc_task(nullptr),
|
||||
_free_arena_memory_task(nullptr),
|
||||
_workers(nullptr),
|
||||
_card_table(nullptr),
|
||||
_collection_pause_end(Ticks::now()),
|
||||
_soft_ref_policy(),
|
||||
_old_set("Old Region Set", new OldRegionSetChecker()),
|
||||
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
|
||||
_bot(NULL),
|
||||
_bot(nullptr),
|
||||
_listener(),
|
||||
_numa(G1NUMA::create()),
|
||||
_hrm(),
|
||||
_allocator(NULL),
|
||||
_allocator(nullptr),
|
||||
_evac_failure_injector(),
|
||||
_verifier(NULL),
|
||||
_verifier(nullptr),
|
||||
_summary_bytes_used(0),
|
||||
_bytes_used_during_gc(0),
|
||||
_survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
|
||||
@ -1252,19 +1252,19 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_gc_timer_stw(new STWGCTimer()),
|
||||
_gc_tracer_stw(new G1NewTracer()),
|
||||
_policy(new G1Policy(_gc_timer_stw)),
|
||||
_heap_sizing_policy(NULL),
|
||||
_heap_sizing_policy(nullptr),
|
||||
_collection_set(this, _policy),
|
||||
_rem_set(NULL),
|
||||
_rem_set(nullptr),
|
||||
_card_set_config(),
|
||||
_card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
|
||||
_cm(NULL),
|
||||
_cm_thread(NULL),
|
||||
_cr(NULL),
|
||||
_task_queues(NULL),
|
||||
_ref_processor_stw(NULL),
|
||||
_cm(nullptr),
|
||||
_cm_thread(nullptr),
|
||||
_cr(nullptr),
|
||||
_task_queues(nullptr),
|
||||
_ref_processor_stw(nullptr),
|
||||
_is_alive_closure_stw(this),
|
||||
_is_subject_to_discovery_stw(this),
|
||||
_ref_processor_cm(NULL),
|
||||
_ref_processor_cm(nullptr),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_subject_to_discovery_cm(this),
|
||||
_region_attr() {
|
||||
@ -1294,7 +1294,7 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
|
||||
_gc_tracer_stw->initialize();
|
||||
|
||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||
guarantee(_task_queues != nullptr, "task_queues allocation failure.");
|
||||
}
|
||||
|
||||
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
|
||||
@ -1330,7 +1330,7 @@ jint G1CollectedHeap::initialize_concurrent_refinement() {
|
||||
|
||||
jint G1CollectedHeap::initialize_service_thread() {
|
||||
_service_thread = new G1ServiceThread();
|
||||
if (_service_thread->osthread() == NULL) {
|
||||
if (_service_thread->osthread() == nullptr) {
|
||||
vm_shutdown_during_initialization("Could not create G1ServiceThread");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
@ -1399,7 +1399,7 @@ jint G1CollectedHeap::initialize() {
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
mtJavaHeap);
|
||||
if(heap_storage == NULL) {
|
||||
if(heap_storage == nullptr) {
|
||||
vm_shutdown_during_initialization("Could not initialize G1 heap");
|
||||
return JNI_ERR;
|
||||
}
|
||||
@ -1461,7 +1461,7 @@ jint G1CollectedHeap::initialize() {
|
||||
}
|
||||
|
||||
_workers = new WorkerThreads("GC Thread", ParallelGCThreads);
|
||||
if (_workers == NULL) {
|
||||
if (_workers == nullptr) {
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
_workers->initialize_workers();
|
||||
@ -2093,7 +2093,7 @@ void G1CollectedHeap::par_iterate_regions_array(HeapRegionClosure* cl,
|
||||
|
||||
do {
|
||||
uint region_idx = regions[cur_pos];
|
||||
if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
|
||||
if (hr_claimer == nullptr || hr_claimer->claim_region(region_idx)) {
|
||||
HeapRegion* r = region_at(region_idx);
|
||||
bool result = cl->do_heap_region(r);
|
||||
guarantee(!result, "Must not cancel iteration");
|
||||
@ -2254,7 +2254,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
|
||||
void G1CollectedHeap::print_on_error(outputStream* st) const {
|
||||
this->CollectedHeap::print_on_error(st);
|
||||
|
||||
if (_cm != NULL) {
|
||||
if (_cm != nullptr) {
|
||||
st->cr();
|
||||
_cm->print_on_error(st);
|
||||
}
|
||||
@ -2374,8 +2374,8 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||
|
||||
HeapWord* result = op.result();
|
||||
bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();
|
||||
assert(result == NULL || ret_succeeded,
|
||||
"the result should be NULL if the VM did not succeed");
|
||||
assert(result == nullptr || ret_succeeded,
|
||||
"the result should be null if the VM did not succeed");
|
||||
*succeeded = ret_succeeded;
|
||||
|
||||
assert_heap_not_locked();
|
||||
@ -2597,7 +2597,7 @@ void G1CollectedHeap::complete_cleaning(bool class_unloading_occurred) {
|
||||
}
|
||||
|
||||
bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
assert(obj != NULL, "must not be NULL");
|
||||
assert(obj != nullptr, "must not be null");
|
||||
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
|
||||
// The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
|
||||
// may falsely indicate that this is not the case here: however the collection set only
|
||||
@ -2608,7 +2608,7 @@ bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
void G1CollectedHeap::make_pending_list_reachable() {
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
oop pll_head = Universe::reference_pending_list();
|
||||
if (pll_head != NULL) {
|
||||
if (pll_head != nullptr) {
|
||||
// Any valid worker id is fine here as we are in the VM thread and single-threaded.
|
||||
_cm->mark_in_bitmap(0 /* worker_id */, pll_head);
|
||||
}
|
||||
@ -2622,7 +2622,7 @@ void G1CollectedHeap::set_humongous_stats(uint num_humongous_total, uint num_hum
|
||||
|
||||
bool G1CollectedHeap::should_sample_collection_set_candidates() const {
|
||||
G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates();
|
||||
return candidates != NULL && candidates->num_remaining() > 0;
|
||||
return candidates != nullptr && candidates->num_remaining() > 0;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats) {
|
||||
@ -2654,7 +2654,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
|
||||
hr->hr_clear(true /* clear_space */);
|
||||
_policy->remset_tracker()->update_at_free(hr);
|
||||
|
||||
if (free_list != NULL) {
|
||||
if (free_list != nullptr) {
|
||||
free_list->add_ordered(hr);
|
||||
}
|
||||
}
|
||||
@ -2677,7 +2677,7 @@ void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed,
|
||||
}
|
||||
|
||||
void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
|
||||
assert(list != NULL, "list can't be null");
|
||||
assert(list != nullptr, "list can't be null");
|
||||
if (!list->is_empty()) {
|
||||
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_hrm.insert_list_into_free_list(list);
|
||||
@ -2877,14 +2877,14 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
|
||||
HeapRegionType::Eden,
|
||||
false /* do_expand */,
|
||||
node_index);
|
||||
if (new_alloc_region != NULL) {
|
||||
if (new_alloc_region != nullptr) {
|
||||
set_region_short_lived_locked(new_alloc_region);
|
||||
_hr_printer.alloc(new_alloc_region, !should_allocate);
|
||||
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
|
||||
return new_alloc_region;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
@ -2917,7 +2917,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
||||
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
||||
|
||||
if (!has_more_regions(dest)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
HeapRegionType type;
|
||||
@ -2932,7 +2932,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
||||
true /* do_expand */,
|
||||
node_index);
|
||||
|
||||
if (new_alloc_region != NULL) {
|
||||
if (new_alloc_region != nullptr) {
|
||||
if (type.is_survivor()) {
|
||||
new_alloc_region->set_survivor();
|
||||
_survivor.add(new_alloc_region);
|
||||
@ -2945,7 +2945,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
||||
_hr_printer.alloc(new_alloc_region);
|
||||
return new_alloc_region;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
@ -2977,7 +2977,7 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
}
|
||||
return _hrm.allocate_free_regions_starting_at(index, 1);
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, size_t obj_size) const {
|
||||
@ -3042,13 +3042,13 @@ public:
|
||||
};
|
||||
|
||||
void G1CollectedHeap::register_nmethod(nmethod* nm) {
|
||||
guarantee(nm != NULL, "sanity");
|
||||
guarantee(nm != nullptr, "sanity");
|
||||
RegisterNMethodOopClosure reg_cl(this, nm);
|
||||
nm->oops_do(®_cl);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
|
||||
guarantee(nm != NULL, "sanity");
|
||||
guarantee(nm != nullptr, "sanity");
|
||||
UnregisterNMethodOopClosure reg_cl(this, nm);
|
||||
nm->oops_do(®_cl, true);
|
||||
}
|
||||
@ -3075,7 +3075,7 @@ public:
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
_g1h->register_nmethod(nm);
|
||||
}
|
||||
}
|
||||
|
@ -352,10 +352,10 @@ private:
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_at_safepoint_on_vm_thread() \
|
||||
do { \
|
||||
assert_at_safepoint(); \
|
||||
assert(Thread::current_or_null() != NULL, "no current thread"); \
|
||||
#define assert_at_safepoint_on_vm_thread() \
|
||||
do { \
|
||||
assert_at_safepoint(); \
|
||||
assert(Thread::current_or_null() != nullptr, "no current thread"); \
|
||||
assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
|
||||
} while (0)
|
||||
|
||||
@ -404,7 +404,7 @@ private:
|
||||
size_t word_size);
|
||||
|
||||
// Attempt to allocate a humongous object of the given size. Return
|
||||
// NULL if unsuccessful.
|
||||
// null if unsuccessful.
|
||||
HeapWord* humongous_obj_allocate(size_t word_size);
|
||||
|
||||
// The following two methods, allocate_new_tlab() and
|
||||
@ -427,7 +427,7 @@ private:
|
||||
// retry the allocation.
|
||||
//
|
||||
// * If all allocation attempts fail, even after trying to schedule
|
||||
// an evacuation pause, allocate_new_tlab() will return NULL,
|
||||
// an evacuation pause, allocate_new_tlab() will return null,
|
||||
// whereas mem_allocate() will attempt a heap expansion and/or
|
||||
// schedule a Full GC.
|
||||
//
|
||||
@ -461,7 +461,7 @@ private:
|
||||
|
||||
// Allocation attempt that should be called during safepoints (e.g.,
|
||||
// at the end of a successful GC). expect_null_mutator_alloc_region
|
||||
// specifies whether the mutator alloc region is expected to be NULL
|
||||
// specifies whether the mutator alloc region is expected to be null
|
||||
// or not.
|
||||
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_mutator_alloc_region);
|
||||
@ -519,7 +519,7 @@ private:
|
||||
// Attempting to expand the heap sufficiently
|
||||
// to support an allocation of the given "word_size". If
|
||||
// successful, perform the allocation and return the address of the
|
||||
// allocated block, or else "NULL".
|
||||
// allocated block, or else null.
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
|
||||
void verify_numa_regions(const char* desc);
|
||||
@ -574,7 +574,7 @@ public:
|
||||
// Returns true if the heap was expanded by the requested amount;
|
||||
// false otherwise.
|
||||
// (Rounds up to a HeapRegion boundary.)
|
||||
bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = NULL, double* expand_time_ms = NULL);
|
||||
bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = nullptr, double* expand_time_ms = nullptr);
|
||||
bool expand_single_region(uint node_index);
|
||||
|
||||
// Returns the PLAB statistics for a given destination.
|
||||
@ -678,7 +678,7 @@ public:
|
||||
|
||||
// Frees a region by resetting its metadata and adding it to the free list
|
||||
// passed as a parameter (this is usually a local list which will be appended
|
||||
// to the master free list later or NULL if free list management is handled
|
||||
// to the master free list later or null if free list management is handled
|
||||
// in another way).
|
||||
// Callers must ensure they are the only one calling free on the given region
|
||||
// at the same time.
|
||||
@ -1118,7 +1118,7 @@ public:
|
||||
// The variant with the HeapRegionClaimer guarantees that the closure will be
|
||||
// applied to a particular region exactly once.
|
||||
void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
|
||||
collection_set_iterate_increment_from(blk, NULL, worker_id);
|
||||
collection_set_iterate_increment_from(blk, nullptr, worker_id);
|
||||
}
|
||||
void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
|
||||
// Iterate over the array of region indexes, uint regions[length], applying
|
||||
@ -1130,11 +1130,11 @@ public:
|
||||
size_t length,
|
||||
uint worker_id) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be nullptr.
|
||||
// Returns the HeapRegion that contains addr. addr must not be null.
|
||||
inline HeapRegion* heap_region_containing(const void* addr) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr, or nullptr if that is an uncommitted
|
||||
// region. addr must not be nullptr.
|
||||
// Returns the HeapRegion that contains addr, or null if that is an uncommitted
|
||||
// region. addr must not be null.
|
||||
inline HeapRegion* heap_region_containing_or_null(const void* addr) const;
|
||||
|
||||
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
|
||||
@ -1226,7 +1226,7 @@ public:
|
||||
// Determine if an object is dead, given only the object itself.
|
||||
// This will find the region to which the object belongs and
|
||||
// then call the region version of the same function.
|
||||
// If obj is NULL it is not dead.
|
||||
// If obj is null it is not dead.
|
||||
inline bool is_obj_dead(const oop obj) const;
|
||||
|
||||
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
|
||||
|
@ -86,7 +86,7 @@ G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
|
||||
return &_old_evac_stats;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
return nullptr; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ inline size_t G1CollectedHeap::clamp_plab_size(size_t value) const {
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||
|
||||
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
|
||||
// Return the region with the given index, or null if unmapped. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
|
||||
|
||||
template <typename Func>
|
||||
@ -230,15 +230,15 @@ void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_in_young(const oop obj) const {
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return heap_region_containing(obj)->is_young();
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
|
||||
assert(obj != NULL, "");
|
||||
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary NULL check
|
||||
assert(obj != nullptr, "");
|
||||
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
|
||||
@ -251,7 +251,7 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) co
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return is_obj_dead(obj, heap_region_containing(obj));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,11 +50,11 @@ G1GCPhaseTimes* G1CollectionSet::phase_times() {
|
||||
G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
|
||||
_g1h(g1h),
|
||||
_policy(policy),
|
||||
_candidates(NULL),
|
||||
_candidates(nullptr),
|
||||
_eden_region_length(0),
|
||||
_survivor_region_length(0),
|
||||
_old_region_length(0),
|
||||
_collection_set_regions(NULL),
|
||||
_collection_set_regions(nullptr),
|
||||
_collection_set_cur_length(0),
|
||||
_collection_set_max_length(0),
|
||||
_num_optional_regions(0),
|
||||
@ -83,7 +83,7 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize(uint max_region_length) {
|
||||
guarantee(_collection_set_regions == NULL, "Must only initialize once.");
|
||||
guarantee(_collection_set_regions == nullptr, "Must only initialize once.");
|
||||
_collection_set_max_length = max_region_length;
|
||||
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
|
||||
}
|
||||
@ -94,7 +94,7 @@ void G1CollectionSet::free_optional_regions() {
|
||||
|
||||
void G1CollectionSet::clear_candidates() {
|
||||
delete _candidates;
|
||||
_candidates = NULL;
|
||||
_candidates = nullptr;
|
||||
}
|
||||
|
||||
// Add the heap region at the head of the non-incremental collection set
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -211,7 +211,7 @@ public:
|
||||
void clear_candidates();
|
||||
|
||||
void set_candidates(G1CollectionSetCandidates* candidates) {
|
||||
assert(_candidates == NULL, "Trying to replace collection set candidates.");
|
||||
assert(_candidates == nullptr, "Trying to replace collection set candidates.");
|
||||
_candidates = candidates;
|
||||
}
|
||||
G1CollectionSetCandidates* candidates() { return _candidates; }
|
||||
|
@ -45,7 +45,7 @@ void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted)
|
||||
uint cur_idx = _num_regions - i - 1;
|
||||
reclaimable += at(cur_idx)->reclaimable_bytes();
|
||||
// Make sure we crash if we access it.
|
||||
_regions[cur_idx] = NULL;
|
||||
_regions[cur_idx] = nullptr;
|
||||
}
|
||||
|
||||
assert(reclaimable == wasted, "Recalculated reclaimable inconsistent");
|
||||
@ -79,16 +79,16 @@ void G1CollectionSetCandidates::verify() const {
|
||||
guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
|
||||
uint idx = _front_idx;
|
||||
size_t sum_of_reclaimable_bytes = 0;
|
||||
HeapRegion *prev = NULL;
|
||||
HeapRegion *prev = nullptr;
|
||||
for (; idx < _num_regions; idx++) {
|
||||
HeapRegion *cur = _regions[idx];
|
||||
guarantee(cur != NULL, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx);
|
||||
guarantee(cur != nullptr, "Regions after _front_idx %u cannot be null but %u is", _front_idx, idx);
|
||||
// The first disjunction filters out regions with objects that were explicitly
|
||||
// pinned after being added to the collection set candidates.
|
||||
guarantee(cur->is_pinned() ||
|
||||
G1CollectionSetChooser::should_add(cur),
|
||||
"Region %u should be eligible for addition.", cur->hrm_index());
|
||||
if (prev != NULL) {
|
||||
if (prev != nullptr) {
|
||||
guarantee(prev->gc_efficiency() >= cur->gc_efficiency(),
|
||||
"GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f",
|
||||
prev->hrm_index(), prev->gc_efficiency(), cur->hrm_index(), cur->gc_efficiency());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,10 +66,10 @@ public:
|
||||
uint cur_idx() const { return _front_idx; }
|
||||
|
||||
HeapRegion* at(uint idx) const {
|
||||
HeapRegion* res = NULL;
|
||||
HeapRegion* res = nullptr;
|
||||
if (idx < _num_regions) {
|
||||
res = _regions[idx];
|
||||
assert(res != NULL, "Unexpected NULL HeapRegion at index %u", idx);
|
||||
assert(res != nullptr, "Unexpected null HeapRegion at index %u", idx);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -39,14 +39,14 @@
|
||||
// a lot of live objects, not the ones with just a lot of live objects if we
|
||||
// ordered according to the amount of reclaimable bytes per region.
|
||||
static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
|
||||
// Make sure that NULL entries are moved to the end.
|
||||
if (hr1 == NULL) {
|
||||
if (hr2 == NULL) {
|
||||
// Make sure that null entries are moved to the end.
|
||||
if (hr1 == nullptr) {
|
||||
if (hr2 == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
} else if (hr2 == NULL) {
|
||||
} else if (hr2 == nullptr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
// Work area for building the set of collection set candidates. Contains references
|
||||
// to heap regions with their GC efficiencies calculated. To reduce contention
|
||||
// on claiming array elements, worker threads claim parts of this array in chunks;
|
||||
// Array elements may be NULL as threads might not get enough regions to fill
|
||||
// Array elements may be null as threads might not get enough regions to fill
|
||||
// up their chunks completely.
|
||||
// Final sorting will remove them.
|
||||
class G1BuildCandidateArray : public StackObj {
|
||||
@ -102,7 +102,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
_data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),
|
||||
_cur_claim_idx(0) {
|
||||
for (uint i = 0; i < _max_size; i++) {
|
||||
_data[i] = NULL;
|
||||
_data[i] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
// Set element in array.
|
||||
void set(uint idx, HeapRegion* hr) {
|
||||
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
|
||||
assert(_data[idx] == NULL, "Value must not have been set.");
|
||||
assert(_data[idx] == nullptr, "Value must not have been set.");
|
||||
_data[idx] = hr;
|
||||
}
|
||||
|
||||
@ -132,11 +132,11 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
return;
|
||||
}
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
assert(_data[i] == NULL, "must be");
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
QuickSort::sort(_data, _cur_claim_idx, order_regions, true);
|
||||
for (uint i = num_regions; i < _max_size; i++) {
|
||||
assert(_data[i] == NULL, "must be");
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
for (uint i = 0; i < num_regions; i++) {
|
||||
dest[i] = _data[i];
|
||||
|
@ -97,7 +97,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
|
||||
|
||||
G1CMMarkStack::G1CMMarkStack() :
|
||||
_max_chunk_capacity(0),
|
||||
_base(NULL),
|
||||
_base(nullptr),
|
||||
_chunk_capacity(0) {
|
||||
set_empty();
|
||||
}
|
||||
@ -109,12 +109,12 @@ bool G1CMMarkStack::resize(size_t new_capacity) {
|
||||
|
||||
TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
|
||||
|
||||
if (new_base == NULL) {
|
||||
if (new_base == nullptr) {
|
||||
log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
|
||||
return false;
|
||||
}
|
||||
// Release old mapping.
|
||||
if (_base != NULL) {
|
||||
if (_base != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ void G1CMMarkStack::expand() {
|
||||
}
|
||||
|
||||
G1CMMarkStack::~G1CMMarkStack() {
|
||||
if (_base != NULL) {
|
||||
if (_base != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
|
||||
}
|
||||
}
|
||||
@ -190,7 +190,7 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
|
||||
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
|
||||
TaskQueueEntryChunk* result = *list;
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
*list = (*list)->next;
|
||||
}
|
||||
return result;
|
||||
@ -199,7 +199,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQu
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
|
||||
MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
|
||||
TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
_chunks_in_chunk_list--;
|
||||
}
|
||||
return result;
|
||||
@ -215,16 +215,16 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
|
||||
// Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
|
||||
// wraparound of _hwm.
|
||||
if (_hwm >= _chunk_capacity) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
|
||||
if (cur_idx >= _chunk_capacity) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
|
||||
result->next = NULL;
|
||||
result->next = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -232,11 +232,11 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
|
||||
// Get a new chunk.
|
||||
TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
|
||||
|
||||
if (new_chunk == NULL) {
|
||||
if (new_chunk == nullptr) {
|
||||
// Did not get a chunk from the free list. Allocate from backing memory.
|
||||
new_chunk = allocate_new_chunk();
|
||||
|
||||
if (new_chunk == NULL) {
|
||||
if (new_chunk == nullptr) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -251,7 +251,7 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
|
||||
bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
|
||||
TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
|
||||
|
||||
if (cur == NULL) {
|
||||
if (cur == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -264,8 +264,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
|
||||
void G1CMMarkStack::set_empty() {
|
||||
_chunks_in_chunk_list = 0;
|
||||
_hwm = 0;
|
||||
_chunk_list = NULL;
|
||||
_free_list = NULL;
|
||||
_chunk_list = nullptr;
|
||||
_free_list = nullptr;
|
||||
}
|
||||
|
||||
G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
|
||||
@ -288,7 +288,7 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
|
||||
assert_at_safepoint();
|
||||
size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
|
||||
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
|
||||
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
|
||||
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
|
||||
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
|
||||
_root_regions[idx].set_start(start);
|
||||
_root_regions[idx].set_end(end);
|
||||
@ -305,20 +305,20 @@ void G1CMRootMemRegions::prepare_for_scan() {
|
||||
|
||||
const MemRegion* G1CMRootMemRegions::claim_next() {
|
||||
if (_should_abort) {
|
||||
// If someone has set the should_abort flag, we return NULL to
|
||||
// If someone has set the should_abort flag, we return null to
|
||||
// force the caller to bail out of their loop.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (_claimed_root_regions >= _num_root_regions) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
|
||||
if (claimed_index < _num_root_regions) {
|
||||
return &_root_regions[claimed_index];
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint G1CMRootMemRegions::num_root_regions() const {
|
||||
@ -404,9 +404,9 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
_cleanup_times(),
|
||||
_total_cleanup_time(0.0),
|
||||
|
||||
_accum_task_vtime(NULL),
|
||||
_accum_task_vtime(nullptr),
|
||||
|
||||
_concurrent_workers(NULL),
|
||||
_concurrent_workers(nullptr),
|
||||
_num_concurrent_workers(0),
|
||||
_max_concurrent_workers(0),
|
||||
|
||||
@ -414,13 +414,13 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_reserved_regions(), mtGC)),
|
||||
_needs_remembered_set_rebuild(false)
|
||||
{
|
||||
assert(CGC_lock != NULL, "CGC_lock must be initialized");
|
||||
assert(CGC_lock != nullptr, "CGC_lock must be initialized");
|
||||
|
||||
_mark_bitmap.initialize(g1h->reserved(), bitmap_storage);
|
||||
|
||||
// Create & start ConcurrentMark thread.
|
||||
_cm_thread = new G1ConcurrentMarkThread(this);
|
||||
if (_cm_thread->osthread() == NULL) {
|
||||
if (_cm_thread->osthread() == nullptr) {
|
||||
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
|
||||
}
|
||||
|
||||
@ -468,7 +468,7 @@ void G1ConcurrentMark::reset() {
|
||||
|
||||
uint max_reserved_regions = _g1h->max_reserved_regions();
|
||||
for (uint i = 0; i < max_reserved_regions; i++) {
|
||||
_top_at_rebuild_starts[i] = NULL;
|
||||
_top_at_rebuild_starts[i] = nullptr;
|
||||
_region_mark_stats[i].clear();
|
||||
}
|
||||
|
||||
@ -480,7 +480,7 @@ void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
|
||||
for (uint j = 0; j < _max_num_tasks; ++j) {
|
||||
_tasks[j]->clear_mark_stats_cache(region_idx);
|
||||
}
|
||||
_top_at_rebuild_starts[region_idx] = NULL;
|
||||
_top_at_rebuild_starts[region_idx] = nullptr;
|
||||
_region_mark_stats[region_idx].clear();
|
||||
}
|
||||
|
||||
@ -953,7 +953,7 @@ public:
|
||||
void work(uint worker_id) {
|
||||
G1CMRootMemRegions* root_regions = _cm->root_regions();
|
||||
const MemRegion* region = root_regions->claim_next();
|
||||
while (region != NULL) {
|
||||
while (region != nullptr) {
|
||||
_cm->scan_root_region(region, worker_id);
|
||||
region = root_regions->claim_next();
|
||||
}
|
||||
@ -1349,7 +1349,7 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
|
||||
log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
|
||||
hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
|
||||
_freed_bytes += hr->used();
|
||||
hr->set_containing_set(NULL);
|
||||
hr->set_containing_set(nullptr);
|
||||
if (hr->is_humongous()) {
|
||||
_humongous_regions_removed++;
|
||||
_g1h->free_humongous_region(hr, _local_cleanup_list);
|
||||
@ -1721,7 +1721,7 @@ public:
|
||||
G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
|
||||
bool do_object_b(oop obj) {
|
||||
return obj != NULL &&
|
||||
return obj != nullptr &&
|
||||
(!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj));
|
||||
}
|
||||
};
|
||||
@ -1867,7 +1867,7 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
|
||||
// Make sure that the reads below do not float before loading curr_region.
|
||||
OrderAccess::loadload();
|
||||
// Above heap_region_containing may return NULL as we always scan claim
|
||||
// Above heap_region_containing may return null as we always scan claim
|
||||
// until the end of the heap. In this case, just jump to the next region.
|
||||
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + HeapRegion::GrainWords;
|
||||
|
||||
@ -1887,7 +1887,7 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
} else {
|
||||
assert(limit == bottom,
|
||||
"the region limit should be at bottom");
|
||||
// we return NULL and the caller should try calling
|
||||
// We return null and the caller should try calling
|
||||
// claim_region() again.
|
||||
return nullptr;
|
||||
}
|
||||
@ -1947,7 +1947,7 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
|
||||
// Verify the global finger
|
||||
HeapWord* global_finger = finger();
|
||||
if (global_finger != nullptr && global_finger < _heap.end()) {
|
||||
// Since we always iterate over all regions, we might get a nullptr HeapRegion
|
||||
// Since we always iterate over all regions, we might get a null HeapRegion
|
||||
// here.
|
||||
HeapRegion* global_hr = _g1h->heap_region_containing_or_null(global_finger);
|
||||
guarantee(global_hr == nullptr || global_finger == global_hr->bottom(),
|
||||
@ -2080,7 +2080,7 @@ void G1ConcurrentMark::print_on_error(outputStream* st) const {
|
||||
|
||||
static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
|
||||
ReferenceProcessor* result = g1h->ref_processor_cm();
|
||||
assert(result != NULL, "CM reference processor should not be NULL");
|
||||
assert(result != nullptr, "CM reference processor should not be null");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2091,8 +2091,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
{ }
|
||||
|
||||
void G1CMTask::setup_for_region(HeapRegion* hr) {
|
||||
assert(hr != NULL,
|
||||
"claim_region() should have filtered out NULL regions");
|
||||
assert(hr != nullptr,
|
||||
"claim_region() should have filtered out null regions");
|
||||
_curr_region = hr;
|
||||
_finger = hr->bottom();
|
||||
update_region_limit();
|
||||
@ -2131,29 +2131,29 @@ void G1CMTask::update_region_limit() {
|
||||
}
|
||||
|
||||
void G1CMTask::giveup_current_region() {
|
||||
assert(_curr_region != NULL, "invariant");
|
||||
assert(_curr_region != nullptr, "invariant");
|
||||
clear_region_fields();
|
||||
}
|
||||
|
||||
void G1CMTask::clear_region_fields() {
|
||||
// Values for these three fields that indicate that we're not
|
||||
// holding on to a region.
|
||||
_curr_region = NULL;
|
||||
_finger = NULL;
|
||||
_region_limit = NULL;
|
||||
_curr_region = nullptr;
|
||||
_finger = nullptr;
|
||||
_region_limit = nullptr;
|
||||
}
|
||||
|
||||
void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
|
||||
if (cm_oop_closure == NULL) {
|
||||
assert(_cm_oop_closure != NULL, "invariant");
|
||||
if (cm_oop_closure == nullptr) {
|
||||
assert(_cm_oop_closure != nullptr, "invariant");
|
||||
} else {
|
||||
assert(_cm_oop_closure == NULL, "invariant");
|
||||
assert(_cm_oop_closure == nullptr, "invariant");
|
||||
}
|
||||
_cm_oop_closure = cm_oop_closure;
|
||||
}
|
||||
|
||||
void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
|
||||
guarantee(mark_bitmap != NULL, "invariant");
|
||||
guarantee(mark_bitmap != nullptr, "invariant");
|
||||
_mark_bitmap = mark_bitmap;
|
||||
clear_region_fields();
|
||||
|
||||
@ -2604,10 +2604,10 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
drain_global_stack(true);
|
||||
|
||||
do {
|
||||
if (!has_aborted() && _curr_region != NULL) {
|
||||
if (!has_aborted() && _curr_region != nullptr) {
|
||||
// This means that we're already holding on to a region.
|
||||
assert(_finger != NULL, "if region is not NULL, then the finger "
|
||||
"should not be NULL either");
|
||||
assert(_finger != nullptr, "if region is not null, then the finger "
|
||||
"should not be null either");
|
||||
|
||||
// We might have restarted this task after an evacuation pause
|
||||
// which might have evacuated the region we're holding on to
|
||||
@ -2656,7 +2656,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// do_bit() method we move the _finger to point to the
|
||||
// object currently being looked at. So, if we bail out, we
|
||||
// have definitely set _finger to something non-null.
|
||||
assert(_finger != NULL, "invariant");
|
||||
assert(_finger != nullptr, "invariant");
|
||||
|
||||
// Region iteration was actually aborted. So now _finger
|
||||
// points to the address of the object we last scanned. If we
|
||||
@ -2682,18 +2682,18 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
drain_global_stack(true);
|
||||
|
||||
// Read the note on the claim_region() method on why it might
|
||||
// return NULL with potentially more regions available for
|
||||
// return null with potentially more regions available for
|
||||
// claiming and why we have to check out_of_regions() to determine
|
||||
// whether we're done or not.
|
||||
while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
|
||||
while (!has_aborted() && _curr_region == nullptr && !_cm->out_of_regions()) {
|
||||
// We are going to try to claim a new region. We should have
|
||||
// given up on the previous one.
|
||||
// Separated the asserts so that we know which one fires.
|
||||
assert(_curr_region == NULL, "invariant");
|
||||
assert(_finger == NULL, "invariant");
|
||||
assert(_region_limit == NULL, "invariant");
|
||||
assert(_curr_region == nullptr, "invariant");
|
||||
assert(_finger == nullptr, "invariant");
|
||||
assert(_region_limit == nullptr, "invariant");
|
||||
HeapRegion* claimed_region = _cm->claim_region(_worker_id);
|
||||
if (claimed_region != NULL) {
|
||||
if (claimed_region != nullptr) {
|
||||
// Yes, we managed to claim one
|
||||
setup_for_region(claimed_region);
|
||||
assert(_curr_region == claimed_region, "invariant");
|
||||
@ -2706,11 +2706,11 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
abort_marking_if_regular_check_fail();
|
||||
}
|
||||
|
||||
if (!has_aborted() && _curr_region == NULL) {
|
||||
if (!has_aborted() && _curr_region == nullptr) {
|
||||
assert(_cm->out_of_regions(),
|
||||
"at this point we should be out of regions");
|
||||
}
|
||||
} while ( _curr_region != NULL && !has_aborted());
|
||||
} while ( _curr_region != nullptr && !has_aborted());
|
||||
|
||||
if (!has_aborted()) {
|
||||
// We cannot check whether the global stack is empty, since other
|
||||
@ -2792,7 +2792,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// Mainly for debugging purposes to make sure that a pointer to the
|
||||
// closure which was statically allocated in this frame doesn't
|
||||
// escape it by accident.
|
||||
set_cm_oop_closure(NULL);
|
||||
set_cm_oop_closure(nullptr);
|
||||
double end_time_ms = os::elapsedVTime() * 1000.0;
|
||||
double elapsed_time_ms = end_time_ms - _start_time_ms;
|
||||
// Update the step history.
|
||||
@ -2864,16 +2864,16 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
_worker_id(worker_id),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(cm),
|
||||
_mark_bitmap(NULL),
|
||||
_mark_bitmap(nullptr),
|
||||
_task_queue(task_queue),
|
||||
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
|
||||
_calls(0),
|
||||
_time_target_ms(0.0),
|
||||
_start_time_ms(0.0),
|
||||
_cm_oop_closure(NULL),
|
||||
_curr_region(NULL),
|
||||
_finger(NULL),
|
||||
_region_limit(NULL),
|
||||
_cm_oop_closure(nullptr),
|
||||
_curr_region(nullptr),
|
||||
_finger(nullptr),
|
||||
_region_limit(nullptr),
|
||||
_words_scanned(0),
|
||||
_words_scanned_limit(0),
|
||||
_real_words_scanned_limit(0),
|
||||
@ -2889,7 +2889,7 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
_termination_start_time_ms(0.0),
|
||||
_marking_step_diff_ms()
|
||||
{
|
||||
guarantee(task_queue != NULL, "invariant");
|
||||
guarantee(task_queue != nullptr, "invariant");
|
||||
|
||||
_marking_step_diff_ms.add(0.5);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,12 +60,12 @@ private:
|
||||
static const uintptr_t ArraySliceBit = 1;
|
||||
|
||||
G1TaskQueueEntry(oop obj) : _holder(obj) {
|
||||
assert(_holder != NULL, "Not allowed to set NULL task queue element");
|
||||
assert(_holder != nullptr, "Not allowed to set null task queue element");
|
||||
}
|
||||
G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
|
||||
public:
|
||||
|
||||
G1TaskQueueEntry() : _holder(NULL) { }
|
||||
G1TaskQueueEntry() : _holder(nullptr) { }
|
||||
// Trivially copyable, for use in GenericTaskQueue.
|
||||
|
||||
static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
|
||||
@ -83,7 +83,7 @@ public:
|
||||
|
||||
bool is_oop() const { return !is_array_slice(); }
|
||||
bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
|
||||
bool is_null() const { return _holder == NULL; }
|
||||
bool is_null() const { return _holder == nullptr; }
|
||||
};
|
||||
|
||||
typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
|
||||
@ -118,7 +118,7 @@ public:
|
||||
// stack memory is split into evenly sized chunks of oops. Users can only
|
||||
// add or remove entries on that basis.
|
||||
// Chunks are filled in increasing address order. Not completely filled chunks
|
||||
// have a NULL element as a terminating element.
|
||||
// have a null element as a terminating element.
|
||||
//
|
||||
// Every chunk has a header containing a single pointer element used for memory
|
||||
// management. This wastes some space, but is negligible (< .1% with current sizing).
|
||||
@ -152,12 +152,12 @@ private:
|
||||
char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
|
||||
|
||||
// Allocate a new chunk from the reserved memory, using the high water mark. Returns
|
||||
// NULL if out of memory.
|
||||
// null if out of memory.
|
||||
TaskQueueEntryChunk* allocate_new_chunk();
|
||||
|
||||
// Atomically add the given chunk to the list.
|
||||
void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
|
||||
// Atomically remove and return a chunk from the given list. Returns NULL if the
|
||||
// Atomically remove and return a chunk from the given list. Returns null if the
|
||||
// list is empty.
|
||||
TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
|
||||
|
||||
@ -183,19 +183,19 @@ private:
|
||||
|
||||
// Pushes the given buffer containing at most EntriesPerChunk elements on the mark
|
||||
// stack. If less than EntriesPerChunk elements are to be pushed, the array must
|
||||
// be terminated with a NULL.
|
||||
// be terminated with a null.
|
||||
// Returns whether the buffer contents were successfully pushed to the global mark
|
||||
// stack.
|
||||
bool par_push_chunk(G1TaskQueueEntry* buffer);
|
||||
|
||||
// Pops a chunk from this mark stack, copying them into the given buffer. This
|
||||
// chunk may contain up to EntriesPerChunk elements. If there are less, the last
|
||||
// element in the array is a NULL pointer.
|
||||
// element in the array is a null pointer.
|
||||
bool par_pop_chunk(G1TaskQueueEntry* buffer);
|
||||
|
||||
// Return whether the chunk list is empty. Racy due to unsynchronized access to
|
||||
// _chunk_list.
|
||||
bool is_empty() const { return _chunk_list == NULL; }
|
||||
bool is_empty() const { return _chunk_list == nullptr; }
|
||||
|
||||
size_t capacity() const { return _chunk_capacity; }
|
||||
|
||||
@ -250,14 +250,14 @@ public:
|
||||
// Reset the claiming / scanning of the root regions.
|
||||
void prepare_for_scan();
|
||||
|
||||
// Forces get_next() to return NULL so that the iteration aborts early.
|
||||
// Forces get_next() to return null so that the iteration aborts early.
|
||||
void abort() { _should_abort = true; }
|
||||
|
||||
// Return true if the CM thread are actively scanning root regions,
|
||||
// false otherwise.
|
||||
bool scan_in_progress() { return _scan_in_progress; }
|
||||
|
||||
// Claim the next root MemRegion to scan atomically, or return NULL if
|
||||
// Claim the next root MemRegion to scan atomically, or return null if
|
||||
// all have been claimed.
|
||||
const MemRegion* claim_next();
|
||||
|
||||
@ -406,7 +406,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
TaskTerminator* terminator() { return &_terminator; }
|
||||
|
||||
// Claims the next available region to be scanned by a marking
|
||||
// task/thread. It might return NULL if the next region is empty or
|
||||
// task/thread. It might return null if the next region is empty or
|
||||
// we have run out of regions. In the latter case, out_of_regions()
|
||||
// determines whether we've really run out of regions or the task
|
||||
// should call claim_region() again. This might seem a bit
|
||||
@ -454,7 +454,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
// Region statistics gathered during marking.
|
||||
G1RegionMarkStats* _region_mark_stats;
|
||||
// Top pointer for each region at the start of the rebuild remembered set process
|
||||
// for regions which remembered sets need to be rebuilt. A NULL for a given region
|
||||
// for regions which remembered sets need to be rebuilt. A null for a given region
|
||||
// means that this region does not be scanned during the rebuilding remembered
|
||||
// set phase at all.
|
||||
HeapWord* volatile* _top_at_rebuild_starts;
|
||||
@ -655,11 +655,11 @@ private:
|
||||
// Oop closure used for iterations over oops
|
||||
G1CMOopClosure* _cm_oop_closure;
|
||||
|
||||
// Region this task is scanning, NULL if we're not scanning any
|
||||
// Region this task is scanning, null if we're not scanning any
|
||||
HeapRegion* _curr_region;
|
||||
// Local finger of this task, NULL if we're not scanning a region
|
||||
// Local finger of this task, null if we're not scanning a region
|
||||
HeapWord* _finger;
|
||||
// Limit of the region this task is scanning, NULL if we're not scanning one
|
||||
// Limit of the region this task is scanning, null if we're not scanning one
|
||||
HeapWord* _region_limit;
|
||||
|
||||
// Number of words this task has scanned
|
||||
|
@ -43,8 +43,8 @@
|
||||
inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
// Check whether the passed in object is null. During discovery the referent
|
||||
// may be cleared between the initial check and being passed in here.
|
||||
if (obj == NULL) {
|
||||
// Return true to avoid discovery when the referent is NULL.
|
||||
if (obj == nullptr) {
|
||||
// Return true to avoid discovery when the referent is null.
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
// mutator may have changed the referent's value (i.e. cleared it) between the
|
||||
// time the referent was determined to be potentially alive and calling this
|
||||
// method.
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
return false;
|
||||
}
|
||||
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
|
||||
@ -96,7 +96,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
|
||||
size_t num_chunks = 0;
|
||||
|
||||
TaskQueueEntryChunk* cur = _chunk_list;
|
||||
while (cur != NULL) {
|
||||
while (cur != nullptr) {
|
||||
guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks);
|
||||
|
||||
for (size_t i = 0; i < EntriesPerChunk; ++i) {
|
||||
@ -142,13 +142,13 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
|
||||
// local check will be more accurate and so result in fewer pushes,
|
||||
// but may also be a little slower.
|
||||
HeapWord* objAddr = cast_from_oop<HeapWord*>(obj);
|
||||
if (_finger != NULL) {
|
||||
if (_finger != nullptr) {
|
||||
// We have a current region.
|
||||
|
||||
// Finger and region values are all NULL or all non-NULL. We
|
||||
// Finger and region values are all null or all non-null. We
|
||||
// use _finger to check since we immediately use its value.
|
||||
assert(_curr_region != NULL, "invariant");
|
||||
assert(_region_limit != NULL, "invariant");
|
||||
assert(_curr_region != nullptr, "invariant");
|
||||
assert(_region_limit != nullptr, "invariant");
|
||||
assert(_region_limit <= global_finger, "invariant");
|
||||
|
||||
// True if obj is less than the local finger, or is between
|
||||
@ -197,14 +197,14 @@ inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const {
|
||||
inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) {
|
||||
uint const region = r->hrm_index();
|
||||
assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region);
|
||||
assert(_top_at_rebuild_starts[region] == NULL,
|
||||
"TARS for region %u has already been set to " PTR_FORMAT " should be NULL",
|
||||
assert(_top_at_rebuild_starts[region] == nullptr,
|
||||
"TARS for region %u has already been set to " PTR_FORMAT " should be null",
|
||||
region, p2i(_top_at_rebuild_starts[region]));
|
||||
G1RemSetTrackingPolicy* tracker = _g1h->policy()->remset_tracker();
|
||||
if (tracker->needs_scan_for_rebuild(r)) {
|
||||
_top_at_rebuild_starts[region] = r->top();
|
||||
} else {
|
||||
// Leave TARS at NULL.
|
||||
// Leave TARS at null.
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,7 +269,7 @@ template <class T>
|
||||
inline bool G1CMTask::deal_with_reference(T* p) {
|
||||
increment_refs_reached();
|
||||
oop const obj = RawAccess<MO_RELAXED>::oop_load(p);
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return make_reference_grey(obj);
|
||||
@ -280,7 +280,7 @@ inline void G1ConcurrentMark::raw_mark_in_bitmap(oop obj) {
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const {
|
||||
assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
|
||||
assert(p != nullptr && oopDesc::is_oop(p), "expected an oop");
|
||||
return _mark_bitmap.is_marked(cast_from_oop<HeapWord*>(p));
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,7 +51,7 @@ public:
|
||||
class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
|
||||
G1CMBitMap* _bm;
|
||||
public:
|
||||
G1CMBitMapMappingChangedListener() : _bm(NULL) {}
|
||||
G1CMBitMapMappingChangedListener() : _bm(nullptr) {}
|
||||
|
||||
void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,7 +54,7 @@
|
||||
// this address (live) objects need to be scanned for references
|
||||
// that might need to be added to the remembered sets.
|
||||
//
|
||||
// Note that bottom <= parsable_bottom <= tars; if there is no tars (i.e. NULL),
|
||||
// Note that bottom <= parsable_bottom <= tars; if there is no tars (i.e. null),
|
||||
// obviously there can not be a parsable_bottom.
|
||||
//
|
||||
// We need to scrub and scan objects to rebuild remembered sets until parsable_bottom;
|
||||
@ -99,7 +99,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
// that there is some rebuild or scrubbing work.
|
||||
//
|
||||
// Based on the results of G1RemSetTrackingPolicy::needs_scan_for_rebuild(),
|
||||
// the value may be changed to nullptr during rebuilding if the region has either:
|
||||
// the value may be changed to null during rebuilding if the region has either:
|
||||
// - been allocated after rebuild start, or
|
||||
// - been eagerly reclaimed by a young collection (only humongous)
|
||||
bool should_rebuild_or_scrub(HeapRegion* hr) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,7 +84,7 @@ G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() {
|
||||
}
|
||||
|
||||
jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint max_num_threads) {
|
||||
assert(cr != NULL, "G1ConcurrentRefine must not be NULL");
|
||||
assert(cr != nullptr, "G1ConcurrentRefine must not be null");
|
||||
_cr = cr;
|
||||
_max_num_threads = max_num_threads;
|
||||
|
||||
@ -138,7 +138,7 @@ bool G1ConcurrentRefineThreadControl::activate(uint worker_id) {
|
||||
|
||||
void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
|
||||
for (uint i = 0; i < _max_num_threads; i++) {
|
||||
if (_threads[i] != NULL) {
|
||||
if (_threads[i] != nullptr) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
}
|
||||
@ -146,7 +146,7 @@ void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
|
||||
|
||||
void G1ConcurrentRefineThreadControl::stop() {
|
||||
for (uint i = 0; i < _max_num_threads; i++) {
|
||||
if (_threads[i] != NULL) {
|
||||
if (_threads[i] != nullptr) {
|
||||
_threads[i]->stop();
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -155,7 +155,7 @@ public:
|
||||
~G1ConcurrentRefine();
|
||||
|
||||
// Returns a G1ConcurrentRefine instance if succeeded to create/initialize the
|
||||
// G1ConcurrentRefine instance. Otherwise, returns nullptr with error code.
|
||||
// G1ConcurrentRefine instance. Otherwise, returns null with error code.
|
||||
static G1ConcurrentRefine* create(G1Policy* policy, jint* ecode);
|
||||
|
||||
// Stop all the refinement threads.
|
||||
|
@ -121,7 +121,7 @@ size_t G1DirtyCardQueueSet::num_cards() const {
|
||||
}
|
||||
|
||||
void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
|
||||
assert(cbn != NULL, "precondition");
|
||||
assert(cbn != nullptr, "precondition");
|
||||
// Increment _num_cards before adding to queue, so queue removal doesn't
|
||||
// need to deal with _num_cards possibly going negative.
|
||||
Atomic::add(&_num_cards, buffer_size() - cbn->index());
|
||||
@ -134,11 +134,11 @@ void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
|
||||
|
||||
// Thread-safe attempt to remove and return the first buffer from
|
||||
// the _completed queue, using the NonblockingQueue::try_pop() underneath.
|
||||
// It has a limitation that it may return NULL when there are objects
|
||||
// It has a limitation that it may return null when there are objects
|
||||
// in the queue if there is a concurrent push/append operation.
|
||||
BufferNode* G1DirtyCardQueueSet::dequeue_completed_buffer() {
|
||||
Thread* current_thread = Thread::current();
|
||||
BufferNode* result = NULL;
|
||||
BufferNode* result = nullptr;
|
||||
while (true) {
|
||||
// Use GlobalCounter critical section to avoid ABA problem.
|
||||
// The release of a buffer to its allocator's free list uses
|
||||
@ -155,10 +155,10 @@ BufferNode* G1DirtyCardQueueSet::dequeue_completed_buffer() {
|
||||
|
||||
BufferNode* G1DirtyCardQueueSet::get_completed_buffer() {
|
||||
BufferNode* result = dequeue_completed_buffer();
|
||||
if (result == NULL) { // Unlikely if no paused buffers.
|
||||
if (result == nullptr) { // Unlikely if no paused buffers.
|
||||
enqueue_previous_paused_buffers();
|
||||
result = dequeue_completed_buffer();
|
||||
if (result == NULL) return NULL;
|
||||
if (result == nullptr) return nullptr;
|
||||
}
|
||||
Atomic::sub(&_num_cards, buffer_size() - result->index());
|
||||
return result;
|
||||
@ -179,14 +179,14 @@ void G1DirtyCardQueueSet::verify_num_cards() const {
|
||||
#endif // ASSERT
|
||||
|
||||
G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
|
||||
_head(NULL), _tail(NULL),
|
||||
_head(nullptr), _tail(nullptr),
|
||||
_safepoint_id(SafepointSynchronize::safepoint_id())
|
||||
{}
|
||||
|
||||
#ifdef ASSERT
|
||||
G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() {
|
||||
assert(Atomic::load(&_head) == NULL, "precondition");
|
||||
assert(_tail == NULL, "precondition");
|
||||
assert(Atomic::load(&_head) == nullptr, "precondition");
|
||||
assert(_tail == nullptr, "precondition");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -199,8 +199,8 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
|
||||
assert_not_at_safepoint();
|
||||
assert(is_next(), "precondition");
|
||||
BufferNode* old_head = Atomic::xchg(&_head, node);
|
||||
if (old_head == NULL) {
|
||||
assert(_tail == NULL, "invariant");
|
||||
if (old_head == nullptr) {
|
||||
assert(_tail == nullptr, "invariant");
|
||||
_tail = node;
|
||||
} else {
|
||||
node->set_next(old_head);
|
||||
@ -210,27 +210,27 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
|
||||
G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
|
||||
BufferNode* head = Atomic::load(&_head);
|
||||
BufferNode* tail = _tail;
|
||||
Atomic::store(&_head, (BufferNode*)NULL);
|
||||
_tail = NULL;
|
||||
Atomic::store(&_head, (BufferNode*)nullptr);
|
||||
_tail = nullptr;
|
||||
return HeadTail(head, tail);
|
||||
}
|
||||
|
||||
G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {}
|
||||
G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(nullptr) {}
|
||||
|
||||
#ifdef ASSERT
|
||||
G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
|
||||
assert(Atomic::load(&_plist) == NULL, "invariant");
|
||||
assert(Atomic::load(&_plist) == nullptr, "invariant");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
|
||||
assert_not_at_safepoint();
|
||||
PausedList* plist = Atomic::load_acquire(&_plist);
|
||||
if (plist == NULL) {
|
||||
if (plist == nullptr) {
|
||||
// Try to install a new next list.
|
||||
plist = new PausedList();
|
||||
PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist);
|
||||
if (old_plist != NULL) {
|
||||
PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)nullptr, plist);
|
||||
if (old_plist != nullptr) {
|
||||
// Some other thread installed a new next list. Use it instead.
|
||||
delete plist;
|
||||
plist = old_plist;
|
||||
@ -248,10 +248,10 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous(
|
||||
// deleted out from under us by a concurrent take_previous().
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
previous = Atomic::load_acquire(&_plist);
|
||||
if ((previous == NULL) || // Nothing to take.
|
||||
if ((previous == nullptr) || // Nothing to take.
|
||||
previous->is_next() || // Not from a previous safepoint.
|
||||
// Some other thread stole it.
|
||||
(Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) {
|
||||
(Atomic::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) {
|
||||
return HeadTail();
|
||||
}
|
||||
}
|
||||
@ -269,8 +269,8 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
|
||||
assert_at_safepoint();
|
||||
HeadTail result;
|
||||
PausedList* plist = Atomic::load(&_plist);
|
||||
if (plist != NULL) {
|
||||
Atomic::store(&_plist, (PausedList*)NULL);
|
||||
if (plist != nullptr) {
|
||||
Atomic::store(&_plist, (PausedList*)nullptr);
|
||||
result = plist->take();
|
||||
delete plist;
|
||||
}
|
||||
@ -279,7 +279,7 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
|
||||
|
||||
void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
|
||||
assert_not_at_safepoint();
|
||||
assert(node->next() == NULL, "precondition");
|
||||
assert(node->next() == nullptr, "precondition");
|
||||
// Ensure there aren't any paused buffers from a previous safepoint.
|
||||
enqueue_previous_paused_buffers();
|
||||
// Cards for paused buffers are included in count, to contribute to
|
||||
@ -291,8 +291,8 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
|
||||
}
|
||||
|
||||
void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) {
|
||||
if (paused._head != NULL) {
|
||||
assert(paused._tail != NULL, "invariant");
|
||||
if (paused._head != nullptr) {
|
||||
assert(paused._tail != nullptr, "invariant");
|
||||
// Cards from paused buffers are already recorded in the queue count.
|
||||
_completed.append(*paused._head, *paused._tail);
|
||||
}
|
||||
@ -311,10 +311,10 @@ void G1DirtyCardQueueSet::enqueue_all_paused_buffers() {
|
||||
void G1DirtyCardQueueSet::abandon_completed_buffers() {
|
||||
BufferNodeList list = take_all_completed_buffers();
|
||||
BufferNode* buffers_to_delete = list._head;
|
||||
while (buffers_to_delete != NULL) {
|
||||
while (buffers_to_delete != nullptr) {
|
||||
BufferNode* bn = buffers_to_delete;
|
||||
buffers_to_delete = bn->next();
|
||||
bn->set_next(NULL);
|
||||
bn->set_next(nullptr);
|
||||
deallocate_buffer(bn);
|
||||
}
|
||||
}
|
||||
@ -324,7 +324,7 @@ void G1DirtyCardQueueSet::abandon_completed_buffers() {
|
||||
void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
|
||||
assert(allocator() == src->allocator(), "precondition");
|
||||
const BufferNodeList from = src->take_all_completed_buffers();
|
||||
if (from._head != NULL) {
|
||||
if (from._head != nullptr) {
|
||||
Atomic::add(&_num_cards, from._entry_count);
|
||||
_completed.append(*from._head, *from._tail);
|
||||
}
|
||||
@ -504,7 +504,7 @@ void G1DirtyCardQueueSet::handle_completed_buffer(BufferNode* new_node,
|
||||
}
|
||||
|
||||
BufferNode* node = get_completed_buffer();
|
||||
if (node == NULL) return; // Didn't get a buffer to process.
|
||||
if (node == nullptr) return; // Didn't get a buffer to process.
|
||||
|
||||
// Refine cards in buffer.
|
||||
|
||||
@ -523,7 +523,7 @@ bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id,
|
||||
if (Atomic::load(&_num_cards) <= stop_at) return false;
|
||||
|
||||
BufferNode* node = get_completed_buffer();
|
||||
if (node == NULL) return false; // Didn't get a buffer to process.
|
||||
if (node == nullptr) return false; // Didn't get a buffer to process.
|
||||
|
||||
bool fully_processed = refine_buffer(node, worker_id, stats);
|
||||
handle_refined_buffer(node, fully_processed);
|
||||
|
@ -73,7 +73,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
|
||||
struct HeadTail {
|
||||
BufferNode* _head;
|
||||
BufferNode* _tail;
|
||||
HeadTail() : _head(NULL), _tail(NULL) {}
|
||||
HeadTail() : _head(nullptr), _tail(nullptr) {}
|
||||
HeadTail(BufferNode* head, BufferNode* tail) : _head(head), _tail(tail) {}
|
||||
};
|
||||
|
||||
@ -126,7 +126,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
|
||||
};
|
||||
|
||||
// The most recently created list, which might be for either the next or
|
||||
// a previous safepoint, or might be NULL if the next list hasn't been
|
||||
// a previous safepoint, or might be null if the next list hasn't been
|
||||
// created yet. We only need one list because of the requirement that
|
||||
// threads calling add() must first ensure there are no paused buffers
|
||||
// from a previous safepoint. There might be many list instances existing
|
||||
@ -211,10 +211,10 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
|
||||
|
||||
// Thread-safe attempt to remove and return the first buffer from
|
||||
// the _completed queue.
|
||||
// Returns NULL if the queue is empty, or if a concurrent push/append
|
||||
// Returns null if the queue is empty, or if a concurrent push/append
|
||||
// interferes. It uses GlobalCounter critical section to avoid ABA problem.
|
||||
BufferNode* dequeue_completed_buffer();
|
||||
// Remove and return a completed buffer from the list, or return NULL
|
||||
// Remove and return a completed buffer from the list, or return null
|
||||
// if none available.
|
||||
BufferNode* get_completed_buffer();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
G1FreeIdSet::G1FreeIdSet(uint start, uint size) :
|
||||
_sem(size), // counting semaphore for available ids
|
||||
_next(NULL), // array of "next" indices
|
||||
_next(nullptr), // array of "next" indices
|
||||
_start(start), // first id value
|
||||
_size(size), // number of available ids
|
||||
_head_index_mask(0), // mask for extracting index from a _head value.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
uintptr_t** G1FromCardCache::_cache = NULL;
|
||||
uintptr_t** G1FromCardCache::_cache = nullptr;
|
||||
uint G1FromCardCache::_max_reserved_regions = 0;
|
||||
size_t G1FromCardCache::_static_mem_size = 0;
|
||||
#ifdef ASSERT
|
||||
@ -40,7 +40,7 @@ uint G1FromCardCache::_max_workers = 0;
|
||||
|
||||
void G1FromCardCache::initialize(uint max_reserved_regions) {
|
||||
guarantee(max_reserved_regions > 0, "Heap size must be valid");
|
||||
guarantee(_cache == NULL, "Should not call this multiple times");
|
||||
guarantee(_cache == nullptr, "Should not call this multiple times");
|
||||
|
||||
_max_reserved_regions = max_reserved_regions;
|
||||
#ifdef ASSERT
|
||||
|
@ -53,7 +53,7 @@ class ReferenceProcessor;
|
||||
class G1FullGCSubjectToDiscoveryClosure: public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) {
|
||||
assert(p != NULL, "must be");
|
||||
assert(p != nullptr, "must be");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -57,7 +57,7 @@ bool G1FullGCCompactionPoint::has_regions() {
|
||||
}
|
||||
|
||||
bool G1FullGCCompactionPoint::is_initialized() {
|
||||
return _current_region != NULL;
|
||||
return _current_region != nullptr;
|
||||
}
|
||||
|
||||
void G1FullGCCompactionPoint::initialize(HeapRegion* hr) {
|
||||
@ -71,7 +71,7 @@ HeapRegion* G1FullGCCompactionPoint::current_region() {
|
||||
|
||||
HeapRegion* G1FullGCCompactionPoint::next_region() {
|
||||
HeapRegion* next = *(++_compaction_region_iterator);
|
||||
assert(next != NULL, "Must return valid region");
|
||||
assert(next != nullptr, "Must return valid region");
|
||||
return next;
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ void G1FullGCCompactionPoint::switch_region() {
|
||||
}
|
||||
|
||||
void G1FullGCCompactionPoint::forward(oop object, size_t size) {
|
||||
assert(_current_region != NULL, "Must have been initialized");
|
||||
assert(_current_region != nullptr, "Must have been initialized");
|
||||
|
||||
// Ensure the object fit in the current region.
|
||||
while (!object_will_fit(size)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -188,7 +188,7 @@ void G1GCPhaseTimes::reset() {
|
||||
_cur_verify_after_time_ms = 0.0;
|
||||
|
||||
for (int i = 0; i < GCParPhasesSentinel; i++) {
|
||||
if (_gc_par_phases[i] != NULL) {
|
||||
if (_gc_par_phases[i] != nullptr) {
|
||||
_gc_par_phases[i]->reset();
|
||||
}
|
||||
}
|
||||
@ -203,10 +203,10 @@ void G1GCPhaseTimes::record_gc_pause_start() {
|
||||
}
|
||||
|
||||
#define ASSERT_PHASE_UNINITIALIZED(phase) \
|
||||
assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
|
||||
assert(_gc_par_phases[phase] == nullptr || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
|
||||
|
||||
double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) {
|
||||
if (_gc_par_phases[phase] == NULL) {
|
||||
if (_gc_par_phases[phase] == nullptr) {
|
||||
return 0.0;
|
||||
}
|
||||
double value = _gc_par_phases[phase]->get(worker);
|
||||
@ -287,17 +287,17 @@ size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, u
|
||||
|
||||
// return the average time for a phase in milliseconds
|
||||
double G1GCPhaseTimes::average_time_ms(GCParPhases phase) const {
|
||||
if (_gc_par_phases[phase] == NULL) {
|
||||
if (_gc_par_phases[phase] == nullptr) {
|
||||
return 0.0;
|
||||
}
|
||||
return _gc_par_phases[phase]->average() * 1000.0;
|
||||
}
|
||||
|
||||
size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) {
|
||||
if (_gc_par_phases[phase] == NULL) {
|
||||
if (_gc_par_phases[phase] == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count");
|
||||
assert(_gc_par_phases[phase]->thread_work_items(index) != nullptr, "No sub count");
|
||||
return _gc_par_phases[phase]->thread_work_items(index)->sum();
|
||||
}
|
||||
|
||||
@ -314,7 +314,7 @@ void G1GCPhaseTimes::details(T* phase, uint indent_level) const {
|
||||
void G1GCPhaseTimes::print_thread_work_items(WorkerDataArray<double>* phase, uint indent_level, outputStream* out) const {
|
||||
for (uint i = 0; i < phase->MaxThreadWorkItems; i++) {
|
||||
WorkerDataArray<size_t>* work_items = phase->thread_work_items(i);
|
||||
if (work_items != NULL) {
|
||||
if (work_items != nullptr) {
|
||||
out->sp((indent_level + 1) * 2);
|
||||
work_items->print_summary_on(out, true);
|
||||
details(work_items, indent_level + 1);
|
||||
@ -602,13 +602,13 @@ void G1EvacPhaseWithTrimTimeTracker::stop() {
|
||||
|
||||
G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool allow_multiple_record) :
|
||||
_start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _allow_multiple_record(allow_multiple_record) {
|
||||
if (_phase_times != NULL) {
|
||||
if (_phase_times != nullptr) {
|
||||
_start_time = Ticks::now();
|
||||
}
|
||||
}
|
||||
|
||||
G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
|
||||
if (_phase_times != NULL) {
|
||||
if (_phase_times != nullptr) {
|
||||
if (_allow_multiple_record) {
|
||||
_phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());
|
||||
} else {
|
||||
@ -629,7 +629,7 @@ G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times,
|
||||
}
|
||||
|
||||
G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
|
||||
if (_phase_times != NULL) {
|
||||
if (_phase_times != nullptr) {
|
||||
// Explicitly stop the trim tracker since it's not yet destructed.
|
||||
_trim_tracker.stop();
|
||||
// Exclude trim time by increasing the start time.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,7 @@ class G1HeapRegionTraceType : AllStatic {
|
||||
case StartsHumongous: return "Starts Humongous";
|
||||
case ContinuesHumongous: return "Continues Humongous";
|
||||
case Old: return "Old";
|
||||
default: ShouldNotReachHere(); return NULL;
|
||||
default: ShouldNotReachHere(); return nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -35,8 +35,8 @@ G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) :
|
||||
_old_length(g1_heap->old_regions_count()),
|
||||
_humongous_length(g1_heap->humongous_regions_count()),
|
||||
_meta_sizes(MetaspaceUtils::get_combined_statistics()),
|
||||
_eden_length_per_node(NULL),
|
||||
_survivor_length_per_node(NULL) {
|
||||
_eden_length_per_node(nullptr),
|
||||
_survivor_length_per_node(nullptr) {
|
||||
|
||||
uint node_count = G1NUMA::numa()->num_active_nodes();
|
||||
|
||||
@ -111,8 +111,8 @@ static void log_regions(const char* msg, size_t before_length, size_t after_leng
|
||||
|
||||
ls.print("%s regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
|
||||
msg, before_length, after_length, capacity);
|
||||
// Not NULL only if gc+heap+numa at Debug level is enabled.
|
||||
if (before_per_node_length != NULL && after_per_node_length != NULL) {
|
||||
// Not null only if gc+heap+numa at Debug level is enabled.
|
||||
if (before_per_node_length != nullptr && after_per_node_length != nullptr) {
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
uint num_nodes = numa->num_active_nodes();
|
||||
const int* node_ids = numa->node_ids();
|
||||
|
@ -126,7 +126,7 @@ class G1VerifyCodeRootOopClosure: public OopClosure {
|
||||
|
||||
public:
|
||||
G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
|
||||
_g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {}
|
||||
_g1h(g1h), _root_cl(root_cl), _nm(nullptr), _vo(vo), _failures(false) {}
|
||||
|
||||
void do_oop(oop* p) { do_oop_work(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
@ -144,7 +144,7 @@ public:
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
_oop_cl->set_nmethod(nm);
|
||||
nm->oops_do(_oop_cl);
|
||||
}
|
||||
@ -191,7 +191,7 @@ public:
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
|
||||
guarantee(obj == nullptr || !_g1h->is_obj_dead_cond(obj, _vo),
|
||||
"Dead object referenced by a not dead object");
|
||||
}
|
||||
};
|
||||
@ -210,7 +210,7 @@ public:
|
||||
}
|
||||
void do_object(oop o) {
|
||||
VerifyLivenessOopClosure isLive(_g1h, _vo);
|
||||
assert(o != NULL, "Huh?");
|
||||
assert(o != nullptr, "Huh?");
|
||||
if (!_g1h->is_obj_dead_cond(o, _vo)) {
|
||||
// If the object is alive according to the full gc mark,
|
||||
// then verify that the marking information agrees.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -91,18 +91,18 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
|
||||
_young_gc_memory_manager("G1 Young Generation", "end of minor GC"),
|
||||
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
|
||||
_conc_gc_memory_manager("G1 Concurrent GC", "end of concurrent GC pause"),
|
||||
_eden_space_pool(NULL),
|
||||
_survivor_space_pool(NULL),
|
||||
_old_gen_pool(NULL),
|
||||
_young_collection_counters(NULL),
|
||||
_full_collection_counters(NULL),
|
||||
_conc_collection_counters(NULL),
|
||||
_young_gen_counters(NULL),
|
||||
_old_gen_counters(NULL),
|
||||
_old_space_counters(NULL),
|
||||
_eden_space_counters(NULL),
|
||||
_from_space_counters(NULL),
|
||||
_to_space_counters(NULL),
|
||||
_eden_space_pool(nullptr),
|
||||
_survivor_space_pool(nullptr),
|
||||
_old_gen_pool(nullptr),
|
||||
_young_collection_counters(nullptr),
|
||||
_full_collection_counters(nullptr),
|
||||
_conc_collection_counters(nullptr),
|
||||
_young_gen_counters(nullptr),
|
||||
_old_gen_counters(nullptr),
|
||||
_old_space_counters(nullptr),
|
||||
_eden_space_counters(nullptr),
|
||||
_from_space_counters(nullptr),
|
||||
_to_space_counters(nullptr),
|
||||
|
||||
_overall_committed(0),
|
||||
_overall_used(0),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,7 @@
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
G1NUMA* G1NUMA::_inst = NULL;
|
||||
G1NUMA* G1NUMA::_inst = nullptr;
|
||||
|
||||
size_t G1NUMA::region_size() const {
|
||||
assert(_region_size > 0, "Heap region size is not yet set");
|
||||
@ -44,7 +44,7 @@ size_t G1NUMA::page_size() const {
|
||||
bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; }
|
||||
|
||||
G1NUMA* G1NUMA::create() {
|
||||
guarantee(_inst == NULL, "Should be called once.");
|
||||
guarantee(_inst == nullptr, "Should be called once.");
|
||||
_inst = new G1NUMA();
|
||||
|
||||
// NUMA only supported on Linux.
|
||||
@ -72,9 +72,9 @@ uint G1NUMA::index_of_node_id(int node_id) const {
|
||||
}
|
||||
|
||||
G1NUMA::G1NUMA() :
|
||||
_node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
|
||||
_node_ids(NULL), _num_active_node_ids(0),
|
||||
_region_size(0), _page_size(0), _stats(NULL) {
|
||||
_node_id_to_index_map(nullptr), _len_node_id_to_index_map(0),
|
||||
_node_ids(nullptr), _num_active_node_ids(0),
|
||||
_region_size(0), _page_size(0), _stats(nullptr) {
|
||||
}
|
||||
|
||||
void G1NUMA::initialize_without_numa() {
|
||||
@ -232,7 +232,7 @@ uint G1NUMA::max_search_depth() const {
|
||||
void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase,
|
||||
uint requested_node_index,
|
||||
uint allocated_node_index) {
|
||||
if (_stats == NULL) {
|
||||
if (_stats == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -250,7 +250,7 @@ void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase,
|
||||
void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase,
|
||||
uint requested_node_index,
|
||||
size_t* allocated_stat) {
|
||||
if (_stats == NULL) {
|
||||
if (_stats == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -258,7 +258,7 @@ void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase,
|
||||
}
|
||||
|
||||
void G1NUMA::print_statistics() const {
|
||||
if (_stats == NULL) {
|
||||
if (_stats == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,7 +64,7 @@ void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result) const {
|
||||
}
|
||||
}
|
||||
|
||||
assert(result != NULL, "Invariant");
|
||||
assert(result != nullptr, "Invariant");
|
||||
result->_hit = hit;
|
||||
result->_requested = requested;
|
||||
}
|
||||
@ -77,7 +77,7 @@ void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result, uint req_index) c
|
||||
requested += _data[req_index][column];
|
||||
}
|
||||
|
||||
assert(result != NULL, "Invariant");
|
||||
assert(result != nullptr, "Invariant");
|
||||
result->_hit = hit;
|
||||
result->_requested = requested;
|
||||
}
|
||||
@ -112,7 +112,7 @@ size_t G1NUMAStats::NodeDataArray::get(uint req_index, uint alloc_index) {
|
||||
}
|
||||
|
||||
void G1NUMAStats::NodeDataArray::copy(uint req_index, size_t* stat) {
|
||||
assert(stat != NULL, "Invariant");
|
||||
assert(stat != nullptr, "Invariant");
|
||||
|
||||
for (uint column = 0; column < _num_column; column++) {
|
||||
_data[req_index][column] += stat[column];
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,7 +33,7 @@ G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1h, G1ParScanThreadState* pa
|
||||
_g1h(g1h),
|
||||
_par_scan_state(par_scan_state),
|
||||
_worker_id(par_scan_state->worker_id()),
|
||||
_scanned_cld(NULL),
|
||||
_scanned_cld(nullptr),
|
||||
_cm(_g1h->concurrent_mark())
|
||||
{ }
|
||||
|
||||
@ -52,7 +52,7 @@ void G1CLDScanClosure::do_cld(ClassLoaderData* cld) {
|
||||
// Clean modified oops since we're going to scavenge all the metadata.
|
||||
cld->oops_do(_closure, ClassLoaderData::_claim_none, true /*clear_modified_oops*/);
|
||||
|
||||
_closure->set_scanned_cld(NULL);
|
||||
_closure->set_scanned_cld(nullptr);
|
||||
|
||||
_closure->trim_queue_partially();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -148,7 +148,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
|
||||
|
||||
HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set();
|
||||
|
||||
assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
|
||||
assert(to_rem_set != nullptr, "Need per-region 'into' remsets.");
|
||||
if (to_rem_set->is_tracked()) {
|
||||
to_rem_set->add_reference(p, _worker_id);
|
||||
}
|
||||
@ -232,7 +232,7 @@ void G1ParCopyClosure<barrier, should_mark>::do_oop_work(T* p) {
|
||||
} else {
|
||||
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
assert(forwardee != nullptr, "forwardee should not be null");
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
|
||||
|
||||
if (barrier == G1BarrierCLD) {
|
||||
@ -257,7 +257,7 @@ void G1ParCopyClosure<barrier, should_mark>::do_oop_work(T* p) {
|
||||
|
||||
template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) {
|
||||
oop const obj = RawAccess<MO_RELAXED>::oop_load(p);
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,7 @@ class G1OopStarChunkedList : public CHeapObj<mtGC> {
|
||||
inline void push(ChunkedList<T*, mtGC>** field, T* p);
|
||||
|
||||
public:
|
||||
G1OopStarChunkedList() : _used_memory(0), _roots(NULL), _croots(NULL), _oops(NULL), _coops(NULL) {}
|
||||
G1OopStarChunkedList() : _used_memory(0), _roots(nullptr), _croots(nullptr), _oops(nullptr), _coops(nullptr) {}
|
||||
~G1OopStarChunkedList();
|
||||
|
||||
size_t used_memory() { return _used_memory; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
template <typename T>
|
||||
inline void G1OopStarChunkedList::push(ChunkedList<T*, mtGC>** field, T* p) {
|
||||
ChunkedList<T*, mtGC>* list = *field;
|
||||
if (list == NULL) {
|
||||
if (list == nullptr) {
|
||||
*field = new ChunkedList<T*, mtGC>();
|
||||
_used_memory += sizeof(ChunkedList<T*, mtGC>);
|
||||
} else if (list->is_full()) {
|
||||
@ -65,7 +65,7 @@ inline void G1OopStarChunkedList::push_oop(oop* p) {
|
||||
|
||||
template <typename T>
|
||||
void G1OopStarChunkedList::delete_list(ChunkedList<T*, mtGC>* c) {
|
||||
while (c != NULL) {
|
||||
while (c != nullptr) {
|
||||
ChunkedList<T*, mtGC>* next = c->next_used();
|
||||
delete c;
|
||||
c = next;
|
||||
@ -75,7 +75,7 @@ void G1OopStarChunkedList::delete_list(ChunkedList<T*, mtGC>* c) {
|
||||
template <typename T>
|
||||
size_t G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
|
||||
size_t result = 0;
|
||||
for (ChunkedList<T*, mtGC>* c = head; c != NULL; c = c->next_used()) {
|
||||
for (ChunkedList<T*, mtGC>* c = head; c != nullptr; c = c->next_used()) {
|
||||
result += c->size();
|
||||
for (size_t i = 0; i < c->size(); i++) {
|
||||
T* p = c->at(i);
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
|
||||
_low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
|
||||
_low_boundary(nullptr), _high_boundary(nullptr), _tail_size(0), _page_size(0),
|
||||
_committed(mtGC), _dirty(mtGC), _special(false) {
|
||||
assert(!rs.executable(), "precondition");
|
||||
initialize_with_page_size(rs, used_size, page_size);
|
||||
@ -44,7 +44,7 @@ G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_s
|
||||
void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
|
||||
guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
|
||||
|
||||
vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
|
||||
vmassert(_low_boundary == nullptr, "VirtualSpace already initialized");
|
||||
vmassert(page_size > 0, "Page size must be non-zero.");
|
||||
|
||||
guarantee(is_aligned(rs.base(), page_size),
|
||||
@ -76,8 +76,8 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t
|
||||
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
|
||||
// This does not release memory it never reserved.
|
||||
// Caller must release via rs.release();
|
||||
_low_boundary = NULL;
|
||||
_high_boundary = NULL;
|
||||
_low_boundary = nullptr;
|
||||
_high_boundary = nullptr;
|
||||
_special = false;
|
||||
_page_size = 0;
|
||||
_tail_size = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -111,7 +111,7 @@ class G1PageBasedVirtualSpace {
|
||||
// Uncommit the given area of pages starting at start being size_in_pages large.
|
||||
void uncommit(size_t start_page, size_t size_in_pages);
|
||||
|
||||
void pretouch(size_t start_page, size_t size_in_pages, WorkerThreads* pretouch_workers = NULL);
|
||||
void pretouch(size_t start_page, size_t size_in_pages, WorkerThreads* pretouch_workers = nullptr);
|
||||
|
||||
// Initialize the given reserved space with the given base address and the size
|
||||
// actually used.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,8 +66,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
_task_queue(g1h->task_queue(worker_id)),
|
||||
_rdc_local_qset(rdcqs),
|
||||
_ct(g1h->card_table()),
|
||||
_closures(NULL),
|
||||
_plab_allocator(NULL),
|
||||
_closures(nullptr),
|
||||
_plab_allocator(nullptr),
|
||||
_age_table(false),
|
||||
_tenuring_threshold(g1h->policy()->tenuring_threshold()),
|
||||
_scanner(g1h, this),
|
||||
@ -76,8 +76,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
_stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
|
||||
_stack_trim_lower_threshold(GCDrainStackTargetSize),
|
||||
_trim_ticks(),
|
||||
_surviving_young_words_base(NULL),
|
||||
_surviving_young_words(NULL),
|
||||
_surviving_young_words_base(nullptr),
|
||||
_surviving_young_words(nullptr),
|
||||
_surviving_words_length(young_cset_length + 1),
|
||||
_old_gen_is_full(false),
|
||||
_partial_objarray_chunk_size(ParGCArrayScanChunk),
|
||||
@ -85,7 +85,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
_string_dedup_requests(),
|
||||
_max_num_optional_regions(optional_cset_length),
|
||||
_numa(g1h->numa()),
|
||||
_obj_alloc_stat(NULL),
|
||||
_obj_alloc_stat(nullptr),
|
||||
EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA)
|
||||
_preserved_marks(preserved_marks),
|
||||
_evacuation_failed_info(),
|
||||
@ -148,7 +148,7 @@ size_t G1ParScanThreadState::lab_undo_waste_words() const {
|
||||
|
||||
#ifdef ASSERT
|
||||
void G1ParScanThreadState::verify_task(narrowOop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
assert(task != nullptr, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_reserved(p),
|
||||
@ -156,7 +156,7 @@ void G1ParScanThreadState::verify_task(narrowOop* task) const {
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::verify_task(oop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
assert(task != nullptr, "invariant");
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_reserved(p),
|
||||
"task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
|
||||
@ -184,7 +184,7 @@ void G1ParScanThreadState::verify_task(ScannerTask task) const {
|
||||
template <class T>
|
||||
MAYBE_INLINE_EVACUATION
|
||||
void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
// Reference should not be NULL here as such are never pushed to the task queue.
|
||||
// Reference should not be null here as such are never pushed to the task queue.
|
||||
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
@ -346,7 +346,7 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
_tenuring_threshold = 0;
|
||||
}
|
||||
|
||||
if (obj_ptr != NULL) {
|
||||
if (obj_ptr != nullptr) {
|
||||
dest->set_old();
|
||||
} else {
|
||||
// We just failed to allocate in old gen. The same idea as explained above
|
||||
@ -358,7 +358,7 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
_old_gen_is_full = previous_plab_refill_failed;
|
||||
assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
|
||||
// no other space to try.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -396,7 +396,7 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
|
||||
size_t word_sz,
|
||||
uint age,
|
||||
uint node_index) {
|
||||
HeapWord* obj_ptr = NULL;
|
||||
HeapWord* obj_ptr = nullptr;
|
||||
// Try slow-path allocation unless we're allocating old and old is already full.
|
||||
if (!(dest_attr->is_old() && _old_gen_is_full)) {
|
||||
bool plab_refill_failed = false;
|
||||
@ -404,14 +404,14 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
|
||||
word_sz,
|
||||
&plab_refill_failed,
|
||||
node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
if (obj_ptr == nullptr) {
|
||||
obj_ptr = allocate_in_next_plab(dest_attr,
|
||||
word_sz,
|
||||
plab_refill_failed,
|
||||
node_index);
|
||||
}
|
||||
}
|
||||
if (obj_ptr != NULL) {
|
||||
if (obj_ptr != nullptr) {
|
||||
update_numa_stats(node_index);
|
||||
if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
@ -463,17 +463,17 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
|
||||
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
// normally check against null once and that's it.
|
||||
if (obj_ptr == nullptr) {
|
||||
obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
if (obj_ptr == nullptr) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return handle_evacuation_failure_par(old, old_mark, word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|
||||
assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
|
||||
assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
|
||||
|
||||
// Should this evacuation fail?
|
||||
@ -494,7 +494,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
|
||||
// examine its contents without other synchronization, since the contents
|
||||
// may not be up to date for them.
|
||||
const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
|
||||
if (forward_ptr == NULL) {
|
||||
if (forward_ptr == nullptr) {
|
||||
|
||||
{
|
||||
const uint young_index = from_region->young_index_in_cset();
|
||||
@ -563,7 +563,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
|
||||
|
||||
G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
|
||||
assert(worker_id < _num_workers, "out of bounds access");
|
||||
if (_states[worker_id] == NULL) {
|
||||
if (_states[worker_id] == nullptr) {
|
||||
_states[worker_id] =
|
||||
new G1ParScanThreadState(_g1h, rdcqs(),
|
||||
_preserved_marks_set.get(worker_id),
|
||||
@ -621,7 +621,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
|
||||
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
|
||||
|
||||
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
|
||||
if (forward_ptr == NULL) {
|
||||
if (forward_ptr == nullptr) {
|
||||
// Forward-to-self succeeded. We are the "owner" of the object.
|
||||
HeapRegion* r = _g1h->heap_region_containing(old);
|
||||
|
||||
@ -676,14 +676,14 @@ void G1ParScanThreadState::initialize_numa_stats() {
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::flush_numa_stats() {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
if (_obj_alloc_stat != nullptr) {
|
||||
uint node_index = _numa->index_of_current_thread();
|
||||
_numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::update_numa_stats(uint node_index) {
|
||||
if (_obj_alloc_stat != NULL) {
|
||||
if (_obj_alloc_stat != nullptr) {
|
||||
_obj_alloc_stat[node_index]++;
|
||||
}
|
||||
}
|
||||
@ -705,7 +705,7 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
|
||||
_evac_failure_regions(evac_failure_regions) {
|
||||
_preserved_marks_set.init(num_workers);
|
||||
for (uint i = 0; i < num_workers; ++i) {
|
||||
_states[i] = NULL;
|
||||
_states[i] = nullptr;
|
||||
}
|
||||
memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -185,7 +185,7 @@ private:
|
||||
// Tries to allocate word_sz in the PLAB of the next "generation" after trying to
|
||||
// allocate into dest. Previous_plab_refill_failed indicates whether previous
|
||||
// PLAB refill for the original (source) object failed.
|
||||
// Returns a non-NULL pointer if successful, and updates dest if required.
|
||||
// Returns a non-null pointer if successful, and updates dest if required.
|
||||
// Also determines whether we should continue to try to allocate into the various
|
||||
// generations or just end trying to allocate.
|
||||
HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
|
@ -73,10 +73,10 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_rs_length(0),
|
||||
_pending_cards_at_gc_start(0),
|
||||
_concurrent_start_to_mixed(),
|
||||
_collection_set(NULL),
|
||||
_g1h(NULL),
|
||||
_collection_set(nullptr),
|
||||
_g1h(nullptr),
|
||||
_phase_times_timer(gc_timer),
|
||||
_phase_times(NULL),
|
||||
_phase_times(nullptr),
|
||||
_mark_remark_start_sec(0),
|
||||
_mark_cleanup_start_sec(0),
|
||||
_tenuring_threshold(MaxTenuringThreshold),
|
||||
@ -517,7 +517,7 @@ double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
G1GCPhaseTimes* G1Policy::phase_times() const {
|
||||
// Lazy allocation because it must follow initialization of all the
|
||||
// OopStorage objects by various other subsystems.
|
||||
if (_phase_times == NULL) {
|
||||
if (_phase_times == nullptr) {
|
||||
_phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
|
||||
}
|
||||
return _phase_times;
|
||||
@ -1035,7 +1035,7 @@ double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) co
|
||||
return 0.0;
|
||||
}
|
||||
size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes;
|
||||
if (bytes_to_copy != NULL) {
|
||||
if (bytes_to_copy != nullptr) {
|
||||
*bytes_to_copy = expected_bytes;
|
||||
}
|
||||
return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->in_young_only_phase());
|
||||
@ -1281,7 +1281,7 @@ class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
|
||||
};
|
||||
|
||||
void G1Policy::clear_collection_set_candidates() {
|
||||
if (_collection_set->candidates() == NULL) {
|
||||
if (_collection_set->candidates() == nullptr) {
|
||||
return;
|
||||
}
|
||||
// Clear remembered sets of remaining candidate regions and the actual candidate
|
||||
@ -1373,7 +1373,7 @@ void G1Policy::abort_time_to_mixed_tracking() {
|
||||
bool G1Policy::next_gc_should_be_mixed(const char* no_candidates_str) const {
|
||||
G1CollectionSetCandidates* candidates = _collection_set->candidates();
|
||||
|
||||
if (candidates == NULL || candidates->is_empty()) {
|
||||
if (candidates == nullptr || candidates->is_empty()) {
|
||||
if (no_candidates_str != nullptr) {
|
||||
log_debug(gc, ergo)("%s (candidate old regions not available)", no_candidates_str);
|
||||
}
|
||||
@ -1418,7 +1418,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c
|
||||
double time_remaining_ms,
|
||||
uint& num_initial_regions,
|
||||
uint& num_optional_regions) {
|
||||
assert(candidates != NULL, "Must be");
|
||||
assert(candidates != nullptr, "Must be");
|
||||
|
||||
num_initial_regions = 0;
|
||||
num_optional_regions = 0;
|
||||
@ -1441,7 +1441,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c
|
||||
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
|
||||
|
||||
HeapRegion* hr = candidates->at(candidate_idx);
|
||||
while (hr != NULL) {
|
||||
while (hr != nullptr) {
|
||||
if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
|
||||
@ -1482,7 +1482,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c
|
||||
}
|
||||
hr = candidates->at(++candidate_idx);
|
||||
}
|
||||
if (hr == NULL) {
|
||||
if (hr == nullptr) {
|
||||
log_debug(gc, ergo, cset)("Old candidate collection set empty.");
|
||||
}
|
||||
|
||||
@ -1509,7 +1509,7 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat
|
||||
|
||||
HeapRegion* r = candidates->at(candidate_idx);
|
||||
while (num_optional_regions < max_optional_regions) {
|
||||
assert(r != NULL, "Region must exist");
|
||||
assert(r != nullptr, "Region must exist");
|
||||
double prediction_ms = predict_region_total_time_ms(r, false);
|
||||
|
||||
if (prediction_ms > time_remaining_ms) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -154,7 +154,7 @@ public:
|
||||
// Predict other time for count young regions.
|
||||
double predict_young_region_other_time_ms(uint count) const;
|
||||
// Predict copying live data time for count eden regions. Return the predict bytes if
|
||||
// bytes_to_copy is non-nullptr.
|
||||
// bytes_to_copy is non-null.
|
||||
double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = nullptr) const;
|
||||
// Total time for a region is handling remembered sets (as a single unit), copying its live data
|
||||
// and other time.
|
||||
@ -313,7 +313,7 @@ public:
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
bool concurrent_operation_is_full_mark(const char* msg = NULL);
|
||||
bool concurrent_operation_is_full_mark(const char* msg = nullptr);
|
||||
|
||||
bool about_to_start_mixed_phase() const;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,8 +39,8 @@ G1RedirtyCardsLocalQueueSet::G1RedirtyCardsLocalQueueSet(G1RedirtyCardsQueueSet*
|
||||
|
||||
#ifdef ASSERT
|
||||
G1RedirtyCardsLocalQueueSet::~G1RedirtyCardsLocalQueueSet() {
|
||||
assert(_buffers._head == NULL, "unflushed qset");
|
||||
assert(_buffers._tail == NULL, "invariant");
|
||||
assert(_buffers._head == nullptr, "unflushed qset");
|
||||
assert(_buffers._tail == nullptr, "invariant");
|
||||
assert(_buffers._entry_count == 0, "invariant");
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -49,7 +49,7 @@ void G1RedirtyCardsLocalQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
_buffers._entry_count += buffer_size() - node->index();
|
||||
node->set_next(_buffers._head);
|
||||
_buffers._head = node;
|
||||
if (_buffers._tail == NULL) {
|
||||
if (_buffers._tail == nullptr) {
|
||||
_buffers._tail = node;
|
||||
}
|
||||
}
|
||||
@ -88,7 +88,7 @@ G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet(BufferNode::Allocator* allocator)
|
||||
PtrQueueSet(allocator),
|
||||
_list(),
|
||||
_entry_count(0),
|
||||
_tail(NULL)
|
||||
_tail(nullptr)
|
||||
DEBUG_ONLY(COMMA _collecting(true))
|
||||
{}
|
||||
|
||||
@ -99,7 +99,7 @@ G1RedirtyCardsQueueSet::~G1RedirtyCardsQueueSet() {
|
||||
#ifdef ASSERT
|
||||
void G1RedirtyCardsQueueSet::verify_empty() const {
|
||||
assert(_list.empty(), "precondition");
|
||||
assert(_tail == NULL, "invariant");
|
||||
assert(_tail == nullptr, "invariant");
|
||||
assert(_entry_count == 0, "invariant");
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -112,7 +112,7 @@ BufferNode* G1RedirtyCardsQueueSet::all_completed_buffers() const {
|
||||
BufferNodeList G1RedirtyCardsQueueSet::take_all_completed_buffers() {
|
||||
DEBUG_ONLY(_collecting = false;)
|
||||
BufferNodeList result(_list.pop_all(), _tail, _entry_count);
|
||||
_tail = NULL;
|
||||
_tail = nullptr;
|
||||
_entry_count = 0;
|
||||
DEBUG_ONLY(_collecting = true;)
|
||||
return result;
|
||||
@ -120,10 +120,10 @@ BufferNodeList G1RedirtyCardsQueueSet::take_all_completed_buffers() {
|
||||
|
||||
void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) {
|
||||
// Node is the tail of a (possibly single element) list just prepended to
|
||||
// _list. If, after that prepend, node's follower is NULL, then node is
|
||||
// _list. If, after that prepend, node's follower is null, then node is
|
||||
// also the tail of _list, so record it as such.
|
||||
if (node->next() == NULL) {
|
||||
assert(_tail == NULL, "invariant");
|
||||
if (node->next() == nullptr) {
|
||||
assert(_tail == nullptr, "invariant");
|
||||
_tail = node;
|
||||
}
|
||||
}
|
||||
@ -137,8 +137,8 @@ void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
|
||||
void G1RedirtyCardsQueueSet::add_bufferlist(const BufferNodeList& buffers) {
|
||||
assert(_collecting, "precondition");
|
||||
if (buffers._head != NULL) {
|
||||
assert(buffers._tail != NULL, "invariant");
|
||||
if (buffers._head != nullptr) {
|
||||
assert(buffers._tail != nullptr, "invariant");
|
||||
Atomic::add(&_entry_count, buffers._entry_count);
|
||||
_list.prepend(*buffers._head, *buffers._tail);
|
||||
update_tail(buffers._tail);
|
||||
|
@ -41,7 +41,7 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||
size_t region_granularity,
|
||||
size_t commit_factor,
|
||||
MEMFLAGS type) :
|
||||
_listener(NULL),
|
||||
_listener(nullptr),
|
||||
_storage(rs, used_size, page_size),
|
||||
_region_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
|
||||
_memory_type(type) {
|
||||
@ -253,7 +253,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
};
|
||||
|
||||
void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
|
||||
if (_listener != NULL) {
|
||||
if (_listener != nullptr) {
|
||||
_listener->on_commit(start_idx, num_regions, zero_filled);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,7 +69,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
|
||||
virtual ~G1RegionToSpaceMapper() {}
|
||||
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkerThreads* pretouch_workers = NULL) = 0;
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkerThreads* pretouch_workers = nullptr) = 0;
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
|
||||
|
||||
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
#include "gc/g1/g1RegionsOnNodes.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
|
||||
G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(NULL), _numa(G1NUMA::numa()) {
|
||||
G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(nullptr), _numa(G1NUMA::numa()) {
|
||||
_count_per_node = NEW_C_HEAP_ARRAY(uint, _numa->num_active_nodes(), mtGC);
|
||||
clear();
|
||||
}
|
||||
|
@ -257,16 +257,16 @@ private:
|
||||
public:
|
||||
G1RemSetScanState() :
|
||||
_max_reserved_regions(0),
|
||||
_collection_set_iter_state(NULL),
|
||||
_card_table_scan_state(NULL),
|
||||
_collection_set_iter_state(nullptr),
|
||||
_card_table_scan_state(nullptr),
|
||||
_scan_chunks_per_region(G1CollectedHeap::get_chunks_per_region()),
|
||||
_log_scan_chunks_per_region(log2i(_scan_chunks_per_region)),
|
||||
_region_scan_chunks(NULL),
|
||||
_region_scan_chunks(nullptr),
|
||||
_num_total_scan_chunks(0),
|
||||
_scan_chunks_shift(0),
|
||||
_all_dirty_regions(NULL),
|
||||
_next_dirty_regions(NULL),
|
||||
_scan_top(NULL) {
|
||||
_all_dirty_regions(nullptr),
|
||||
_next_dirty_regions(nullptr),
|
||||
_scan_top(nullptr) {
|
||||
}
|
||||
|
||||
~G1RemSetScanState() {
|
||||
@ -277,7 +277,7 @@ public:
|
||||
}
|
||||
|
||||
void initialize(size_t max_reserved_regions) {
|
||||
assert(_collection_set_iter_state == NULL, "Must not be initialized twice");
|
||||
assert(_collection_set_iter_state == nullptr, "Must not be initialized twice");
|
||||
_max_reserved_regions = max_reserved_regions;
|
||||
_collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_reserved_regions, mtGC);
|
||||
_card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
|
||||
@ -327,7 +327,7 @@ public:
|
||||
// as we do not clean up remembered sets before merging heap roots.
|
||||
bool contains_cards_to_process(uint const region_idx) const {
|
||||
HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
|
||||
return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous());
|
||||
return (hr != nullptr && !hr->in_collection_set() && hr->is_old_or_humongous());
|
||||
}
|
||||
|
||||
size_t num_visited_cards() const {
|
||||
@ -368,10 +368,10 @@ public:
|
||||
|
||||
void cleanup() {
|
||||
delete _all_dirty_regions;
|
||||
_all_dirty_regions = NULL;
|
||||
_all_dirty_regions = nullptr;
|
||||
|
||||
delete _next_dirty_regions;
|
||||
_next_dirty_regions = NULL;
|
||||
_next_dirty_regions = nullptr;
|
||||
}
|
||||
|
||||
void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) {
|
||||
@ -452,7 +452,7 @@ public:
|
||||
}
|
||||
|
||||
void clear_scan_top(uint region_idx) {
|
||||
set_scan_top(region_idx, NULL);
|
||||
set_scan_top(region_idx, nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
@ -536,7 +536,7 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure {
|
||||
G1ScanCardClosure card_cl(_g1h, _pss, _heap_roots_found);
|
||||
|
||||
HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl);
|
||||
assert(scanned_to != NULL, "Should be able to scan range");
|
||||
assert(scanned_to != nullptr, "Should be able to scan range");
|
||||
assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end()));
|
||||
|
||||
_pss->trim_queue_partially();
|
||||
@ -669,11 +669,11 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure {
|
||||
|
||||
G1CardTableChunkClaimer claim(_scan_state, region_idx);
|
||||
|
||||
// Set the current scan "finger" to NULL for every heap region to scan. Since
|
||||
// Set the current scan "finger" to null for every heap region to scan. Since
|
||||
// the claim value is monotonically increasing, the check to not scan below this
|
||||
// will filter out objects spanning chunks within the region too then, as opposed
|
||||
// to resetting this value for every claim.
|
||||
_scanned_to = NULL;
|
||||
_scanned_to = nullptr;
|
||||
|
||||
while (claim.has_next()) {
|
||||
_chunks_claimed++;
|
||||
@ -708,7 +708,7 @@ public:
|
||||
_heap_roots_found(0),
|
||||
_rem_set_root_scan_time(),
|
||||
_rem_set_trim_partially_time(),
|
||||
_scanned_to(NULL),
|
||||
_scanned_to(nullptr),
|
||||
_scanned_card_value(remember_already_scanned_cards ? G1CardTable::g1_scanned_card_val()
|
||||
: G1CardTable::clean_card_val()) {
|
||||
}
|
||||
@ -868,7 +868,7 @@ void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss,
|
||||
|
||||
#ifdef ASSERT
|
||||
void G1RemSet::assert_scan_top_is_null(uint hrm_index) {
|
||||
assert(_scan_state->scan_top(hrm_index) == NULL,
|
||||
assert(_scan_state->scan_top(hrm_index) == nullptr,
|
||||
"scan_top of region %u is unexpectedly " PTR_FORMAT,
|
||||
hrm_index, p2i(_scan_state->scan_top(hrm_index)));
|
||||
}
|
||||
@ -880,7 +880,7 @@ void G1RemSet::prepare_region_for_scan(HeapRegion* r) {
|
||||
r->prepare_remset_for_scan();
|
||||
|
||||
// Only update non-collection set old regions, others must have already been set
|
||||
// to NULL (don't scan) in the initialization.
|
||||
// to null (don't scan) in the initialization.
|
||||
if (r->in_collection_set()) {
|
||||
assert_scan_top_is_null(hrm_index);
|
||||
} else if (r->is_old_or_humongous()) {
|
||||
@ -1444,14 +1444,14 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
|
||||
HeapRegion* r = _g1h->heap_region_containing_or_null(start);
|
||||
|
||||
// If this is a (stale) card into an uncommitted region, exit.
|
||||
if (r == NULL) {
|
||||
if (r == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
check_card_ptr(card_ptr, _ct);
|
||||
|
||||
// If the card is no longer dirty, nothing to do.
|
||||
// We cannot load the card value before the "r == NULL" check, because G1
|
||||
// We cannot load the card value before the "r == nullptr" check above, because G1
|
||||
// could uncommit parts of the card table covering uncommitted regions.
|
||||
if (*card_ptr != G1CardTable::dirty_card_val()) {
|
||||
return false;
|
||||
@ -1532,7 +1532,7 @@ void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
|
||||
assert(!dirty_region.is_empty(), "sanity");
|
||||
|
||||
G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
|
||||
if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
|
||||
if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -55,13 +55,13 @@ void G1RemSetSummary::update() {
|
||||
}
|
||||
|
||||
void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) {
|
||||
assert(_rs_threads_vtimes != NULL, "just checking");
|
||||
assert(_rs_threads_vtimes != nullptr, "just checking");
|
||||
assert(thread < _num_vtimes, "just checking");
|
||||
_rs_threads_vtimes[thread] = value;
|
||||
}
|
||||
|
||||
double G1RemSetSummary::rs_thread_vtime(uint thread) const {
|
||||
assert(_rs_threads_vtimes != NULL, "just checking");
|
||||
assert(_rs_threads_vtimes != nullptr, "just checking");
|
||||
assert(thread < _num_vtimes, "just checking");
|
||||
return _rs_threads_vtimes[thread];
|
||||
}
|
||||
@ -82,14 +82,14 @@ G1RemSetSummary::~G1RemSetSummary() {
|
||||
}
|
||||
|
||||
void G1RemSetSummary::set(G1RemSetSummary* other) {
|
||||
assert(other != NULL, "just checking");
|
||||
assert(other != nullptr, "just checking");
|
||||
assert(_num_vtimes == other->_num_vtimes, "just checking");
|
||||
|
||||
memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes);
|
||||
}
|
||||
|
||||
void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
|
||||
assert(other != NULL, "just checking");
|
||||
assert(other != nullptr, "just checking");
|
||||
assert(_num_vtimes == other->_num_vtimes, "just checking");
|
||||
|
||||
for (uint i = 0; i < _num_vtimes; i++) {
|
||||
@ -211,8 +211,8 @@ private:
|
||||
public:
|
||||
HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
|
||||
_free("Free"), _old("Old"), _all("All"),
|
||||
_max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL),
|
||||
_max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL)
|
||||
_max_rs_mem_sz(0), _max_rs_mem_sz_region(nullptr),
|
||||
_max_code_root_mem_sz(0), _max_code_root_mem_sz_region(nullptr)
|
||||
{}
|
||||
|
||||
bool do_heap_region(HeapRegion* r) {
|
||||
@ -234,7 +234,7 @@ public:
|
||||
}
|
||||
size_t code_root_elems = hrrs->code_roots_list_length();
|
||||
|
||||
RegionTypeCounter* current = NULL;
|
||||
RegionTypeCounter* current = nullptr;
|
||||
if (r->is_free()) {
|
||||
current = &_free;
|
||||
} else if (r->is_young()) {
|
||||
@ -255,7 +255,7 @@ public:
|
||||
}
|
||||
|
||||
void print_summary_on(outputStream* out) {
|
||||
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL };
|
||||
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, nullptr };
|
||||
|
||||
out->print_cr(" Current rem set statistics");
|
||||
out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT
|
||||
@ -263,13 +263,13 @@ public:
|
||||
total_rs_mem_sz(),
|
||||
max_rs_mem_sz(),
|
||||
total_rs_unused_mem_sz());
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) {
|
||||
(*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
|
||||
}
|
||||
|
||||
out->print_cr(" " SIZE_FORMAT " occupied cards represented.",
|
||||
total_cards_occupied());
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) {
|
||||
(*current)->print_cards_occupied_info_on(out, total_cards_occupied());
|
||||
}
|
||||
|
||||
@ -293,13 +293,13 @@ public:
|
||||
proper_unit_for_byte_size(total_code_root_mem_sz()),
|
||||
byte_size_in_proper_unit(max_code_root_rem_set->code_roots_mem_size()),
|
||||
proper_unit_for_byte_size(max_code_root_rem_set->code_roots_mem_size()));
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) {
|
||||
(*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
|
||||
}
|
||||
|
||||
out->print_cr(" " SIZE_FORMAT " code roots represented.",
|
||||
total_code_root_elems());
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) {
|
||||
(*current)->print_code_root_elems_info_on(out, total_code_root_elems());
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ public:
|
||||
};
|
||||
|
||||
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
|
||||
G1EvacuationRootClosures* res = NULL;
|
||||
G1EvacuationRootClosures* res = nullptr;
|
||||
if (g1h->collector_state()->in_concurrent_start_gc()) {
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
res = new G1ConcurrentStartMarkClosures<false>(g1h, pss);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -87,7 +87,7 @@ public:
|
||||
|
||||
OopClosure* strong_oops() { return _roots; }
|
||||
|
||||
CLDClosure* weak_clds() { return NULL; }
|
||||
CLDClosure* weak_clds() { return nullptr; }
|
||||
CLDClosure* strong_clds() { return _clds; }
|
||||
|
||||
CodeBlobClosure* strong_codeblobs() { return _blobs; }
|
||||
@ -98,8 +98,8 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
|
||||
CodeBlobClosure* blobs) {
|
||||
StrongRootsClosures closures(oops, clds, blobs);
|
||||
|
||||
process_java_roots(&closures, NULL, 0);
|
||||
process_vm_roots(&closures, NULL, 0);
|
||||
process_java_roots(&closures, nullptr, 0);
|
||||
process_vm_roots(&closures, nullptr, 0);
|
||||
|
||||
// CodeCache is already processed in java roots
|
||||
// refProcessor is not needed since we are inside a safe point
|
||||
@ -123,9 +123,9 @@ public:
|
||||
CLDClosure* weak_clds() { return _clds; }
|
||||
CLDClosure* strong_clds() { return _clds; }
|
||||
|
||||
// We don't want to visit code blobs more than once, so we return NULL for the
|
||||
// We don't want to visit code blobs more than once, so we return null for the
|
||||
// strong case and walk the entire code cache as a separate step.
|
||||
CodeBlobClosure* strong_codeblobs() { return NULL; }
|
||||
CodeBlobClosure* strong_codeblobs() { return nullptr; }
|
||||
};
|
||||
|
||||
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
@ -133,10 +133,10 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
CodeBlobClosure* blobs) {
|
||||
AllRootsClosures closures(oops, clds);
|
||||
|
||||
process_java_roots(&closures, NULL, 0);
|
||||
process_vm_roots(&closures, NULL, 0);
|
||||
process_java_roots(&closures, nullptr, 0);
|
||||
process_vm_roots(&closures, nullptr, 0);
|
||||
|
||||
process_code_cache_roots(blobs, NULL, 0);
|
||||
process_code_cache_roots(blobs, nullptr, 0);
|
||||
|
||||
// refProcessor is not needed since we are inside a safe point
|
||||
_process_strong_tasks.all_tasks_claimed(G1RP_PS_refProcessor_oops_do);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,7 +50,7 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const
|
||||
// requires marking.
|
||||
//
|
||||
// The entry must point into the G1 heap. In particular, it must not
|
||||
// be a NULL pointer. NULL pointers are pre-filtered and never
|
||||
// be a null pointer. null pointers are pre-filtered and never
|
||||
// inserted into a SATB buffer.
|
||||
//
|
||||
// An entry that is below the TAMS pointer for the containing heap
|
||||
@ -81,7 +81,7 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const
|
||||
// in an unfiltered buffer refer to valid objects.
|
||||
|
||||
static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) {
|
||||
// Includes rejection of NULL pointers.
|
||||
// Includes rejection of null pointers.
|
||||
assert(g1h->is_in_reserved(entry),
|
||||
"Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,7 @@ G1ServiceThread::G1ServiceThread() :
|
||||
|
||||
void G1ServiceThread::register_task(G1ServiceTask* task, jlong delay_ms) {
|
||||
guarantee(!task->is_registered(), "Task already registered");
|
||||
guarantee(task->next() == NULL, "Task already in queue");
|
||||
guarantee(task->next() == nullptr, "Task already in queue");
|
||||
|
||||
// Make sure the service thread is still up and running, there is a race
|
||||
// during shutdown where the service thread has been stopped, but other
|
||||
@ -70,7 +70,7 @@ void G1ServiceThread::register_task(G1ServiceTask* task, jlong delay_ms) {
|
||||
|
||||
void G1ServiceThread::schedule(G1ServiceTask* task, jlong delay_ms, bool notify) {
|
||||
guarantee(task->is_registered(), "Must be registered before scheduled");
|
||||
guarantee(task->next() == NULL, "Task already in queue");
|
||||
guarantee(task->next() == nullptr, "Task already in queue");
|
||||
|
||||
// Schedule task by setting the task time and adding it to queue.
|
||||
jlong delay = TimeHelper::millis_to_counter(delay_ms);
|
||||
@ -114,7 +114,7 @@ G1ServiceTask* G1ServiceThread::wait_for_task() {
|
||||
}
|
||||
}
|
||||
}
|
||||
return nullptr; // Return nullptr when terminating.
|
||||
return nullptr; // Return null when terminating.
|
||||
}
|
||||
|
||||
void G1ServiceThread::run_task(G1ServiceTask* task) {
|
||||
@ -154,15 +154,15 @@ void G1ServiceThread::stop_service() {
|
||||
G1ServiceTask::G1ServiceTask(const char* name) :
|
||||
_time(),
|
||||
_name(name),
|
||||
_next(NULL),
|
||||
_service_thread(NULL) { }
|
||||
_next(nullptr),
|
||||
_service_thread(nullptr) { }
|
||||
|
||||
void G1ServiceTask::set_service_thread(G1ServiceThread* thread) {
|
||||
_service_thread = thread;
|
||||
}
|
||||
|
||||
bool G1ServiceTask::is_registered() {
|
||||
return _service_thread != NULL;
|
||||
return _service_thread != nullptr;
|
||||
}
|
||||
|
||||
void G1ServiceTask::schedule(jlong delay_ms) {
|
||||
@ -177,7 +177,7 @@ const char* G1ServiceTask::name() {
|
||||
}
|
||||
|
||||
void G1ServiceTask::set_time(jlong time) {
|
||||
assert(_next == NULL, "Not allowed to update time while in queue");
|
||||
assert(_next == nullptr, "Not allowed to update time while in queue");
|
||||
_time = time;
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ void G1ServiceTaskQueue::remove_front() {
|
||||
|
||||
G1ServiceTask* task = _sentinel.next();
|
||||
_sentinel.set_next(task->next());
|
||||
task->set_next(NULL);
|
||||
task->set_next(nullptr);
|
||||
}
|
||||
|
||||
G1ServiceTask* G1ServiceTaskQueue::front() {
|
||||
@ -213,8 +213,8 @@ bool G1ServiceTaskQueue::is_empty() {
|
||||
}
|
||||
|
||||
void G1ServiceTaskQueue::add_ordered(G1ServiceTask* task) {
|
||||
assert(task != NULL, "not a valid task");
|
||||
assert(task->next() == NULL, "invariant");
|
||||
assert(task != nullptr, "not a valid task");
|
||||
assert(task->next() == nullptr, "invariant");
|
||||
assert(task->time() != max_jlong, "invalid time for task");
|
||||
|
||||
G1ServiceTask* current = &_sentinel;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -111,7 +111,7 @@ class G1ServiceThread: public ConcurrentGCThread {
|
||||
void stop_service();
|
||||
|
||||
// Return the next ready task, waiting until a task is ready.
|
||||
// Instead returns nullptr if termination requested.
|
||||
// Instead returns null if termination requested.
|
||||
G1ServiceTask* wait_for_task();
|
||||
|
||||
void run_task(G1ServiceTask* task);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,9 @@
|
||||
|
||||
G1SurvRateGroup::G1SurvRateGroup() :
|
||||
_stats_arrays_length(0),
|
||||
_accum_surv_rate_pred(NULL),
|
||||
_accum_surv_rate_pred(nullptr),
|
||||
_last_pred(0.0),
|
||||
_surv_rate_predictors(NULL),
|
||||
_surv_rate_predictors(nullptr),
|
||||
_num_added_regions(0) {
|
||||
reset();
|
||||
start_adding_regions();
|
||||
@ -57,7 +57,7 @@ void G1SurvRateGroup::reset() {
|
||||
|
||||
// Seed initial _surv_rate_pred and _accum_surv_rate_pred values
|
||||
guarantee(_stats_arrays_length == 1, "invariant" );
|
||||
guarantee(_surv_rate_predictors[0] != NULL, "invariant" );
|
||||
guarantee(_surv_rate_predictors[0] != nullptr, "invariant" );
|
||||
const double initial_surv_rate = 0.4;
|
||||
_surv_rate_predictors[0]->add(initial_surv_rate);
|
||||
_last_pred = _accum_surv_rate_pred[0] = initial_surv_rate;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,7 @@
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
G1UncommitRegionTask* G1UncommitRegionTask::_instance = NULL;
|
||||
G1UncommitRegionTask* G1UncommitRegionTask::_instance = nullptr;
|
||||
|
||||
G1UncommitRegionTask::G1UncommitRegionTask() :
|
||||
G1ServiceTask("G1 Uncommit Region Task"),
|
||||
@ -38,7 +38,7 @@ G1UncommitRegionTask::G1UncommitRegionTask() :
|
||||
_summary_region_count(0) { }
|
||||
|
||||
void G1UncommitRegionTask::initialize() {
|
||||
assert(_instance == NULL, "Already initialized");
|
||||
assert(_instance == nullptr, "Already initialized");
|
||||
_instance = new G1UncommitRegionTask();
|
||||
|
||||
// Register the task with the service thread. This will automatically
|
||||
@ -48,7 +48,7 @@ void G1UncommitRegionTask::initialize() {
|
||||
}
|
||||
|
||||
G1UncommitRegionTask* G1UncommitRegionTask::instance() {
|
||||
if (_instance == NULL) {
|
||||
if (_instance == nullptr) {
|
||||
initialize();
|
||||
}
|
||||
return _instance;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -129,7 +129,7 @@ void VM_G1CollectForAllocation::doit() {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (_result != NULL) {
|
||||
if (_result != nullptr) {
|
||||
// If we can successfully allocate before we actually do the
|
||||
// pause then we will consider this pause successful.
|
||||
_gc_succeeded = true;
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
_pause_cause(cause),
|
||||
// Fake a "no cause" and manually add the correct string in update_young_gc_name()
|
||||
// to make the string look more natural.
|
||||
_tt(update_young_gc_name(), NULL, GCCause::_no_gc, true) {
|
||||
_tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
|
||||
}
|
||||
|
||||
~G1YoungGCTraceTime() {
|
||||
@ -793,7 +793,7 @@ public:
|
||||
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "the caller should have filtered out NULL values");
|
||||
assert(obj != nullptr, "the caller should have filtered out null values");
|
||||
|
||||
const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
|
||||
if (!region_attr.is_in_cset_or_humongous_candidate()) {
|
||||
|
@ -110,9 +110,9 @@ void HeapRegion::handle_evacuation_failure() {
|
||||
}
|
||||
|
||||
void HeapRegion::unlink_from_list() {
|
||||
set_next(NULL);
|
||||
set_prev(NULL);
|
||||
set_containing_set(NULL);
|
||||
set_next(nullptr);
|
||||
set_prev(nullptr);
|
||||
set_containing_set(nullptr);
|
||||
}
|
||||
|
||||
void HeapRegion::hr_clear(bool clear_space) {
|
||||
@ -204,7 +204,7 @@ void HeapRegion::clear_humongous() {
|
||||
assert(is_humongous(), "pre-condition");
|
||||
|
||||
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
|
||||
_humongous_start_region = NULL;
|
||||
_humongous_start_region = nullptr;
|
||||
}
|
||||
|
||||
void HeapRegion::prepare_remset_for_scan() {
|
||||
@ -217,23 +217,23 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
G1CardSetConfiguration* config) :
|
||||
_bottom(mr.start()),
|
||||
_end(mr.end()),
|
||||
_top(NULL),
|
||||
_top(nullptr),
|
||||
_bot_part(bot, this),
|
||||
_pre_dummy_top(NULL),
|
||||
_rem_set(NULL),
|
||||
_pre_dummy_top(nullptr),
|
||||
_rem_set(nullptr),
|
||||
_hrm_index(hrm_index),
|
||||
_type(),
|
||||
_humongous_start_region(NULL),
|
||||
_humongous_start_region(nullptr),
|
||||
_index_in_opt_cset(InvalidCSetIndex),
|
||||
_next(NULL), _prev(NULL),
|
||||
_next(nullptr), _prev(nullptr),
|
||||
#ifdef ASSERT
|
||||
_containing_set(NULL),
|
||||
_containing_set(nullptr),
|
||||
#endif
|
||||
_top_at_mark_start(NULL),
|
||||
_parsable_bottom(NULL),
|
||||
_top_at_mark_start(nullptr),
|
||||
_parsable_bottom(nullptr),
|
||||
_garbage_bytes(0),
|
||||
_young_index_in_cset(-1),
|
||||
_surv_rate_group(NULL), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
|
||||
_surv_rate_group(nullptr), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
|
||||
_node_index(G1NUMA::UnknownNodeIndex)
|
||||
{
|
||||
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
|
||||
@ -354,8 +354,8 @@ public:
|
||||
_hr(hr), _failures(false) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
nmethod* nm = (cb == nullptr) ? nullptr : cb->as_compiled_method()->as_nmethod_or_null();
|
||||
if (nm != nullptr) {
|
||||
// Verify that the nemthod is live
|
||||
VerifyCodeRootOopClosure oop_cl(_hr);
|
||||
nm->oops_do(&oop_cl);
|
||||
|
@ -97,8 +97,8 @@ public:
|
||||
assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
|
||||
_pre_dummy_top = pre_dummy_top;
|
||||
}
|
||||
HeapWord* pre_dummy_top() const { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; }
|
||||
void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
|
||||
HeapWord* pre_dummy_top() const { return (_pre_dummy_top == nullptr) ? top() : _pre_dummy_top; }
|
||||
void reset_pre_dummy_top() { _pre_dummy_top = nullptr; }
|
||||
|
||||
// Returns true iff the given the heap region contains the
|
||||
// given address as part of an allocated object. This may
|
||||
@ -128,13 +128,13 @@ private:
|
||||
void mangle_unused_area() PRODUCT_RETURN;
|
||||
|
||||
// Try to allocate at least min_word_size and up to desired_size from this region.
|
||||
// Returns NULL if not possible, otherwise sets actual_word_size to the amount of
|
||||
// Returns null if not possible, otherwise sets actual_word_size to the amount of
|
||||
// space allocated.
|
||||
// This version assumes that all allocation requests to this HeapRegion are properly
|
||||
// synchronized.
|
||||
inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
|
||||
// Try to allocate at least min_word_size and up to desired_size from this HeapRegion.
|
||||
// Returns NULL if not possible, otherwise sets actual_word_size to the amount of
|
||||
// Returns null if not possible, otherwise sets actual_word_size to the amount of
|
||||
// space allocated.
|
||||
// This version synchronizes with other calls to par_allocate_impl().
|
||||
inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
|
||||
@ -269,7 +269,7 @@ private:
|
||||
// object and apply the given closure to them.
|
||||
// Humongous objects are allocated directly in the old-gen. So we need special
|
||||
// handling for concurrent processing encountering an in-progress allocation.
|
||||
// Returns the address after the last actually scanned or NULL if the area could
|
||||
// Returns the address after the last actually scanned or null if the area could
|
||||
// not be scanned (That should only happen when invoked concurrently with the
|
||||
// mutator).
|
||||
template <class Closure, bool in_gc_pause>
|
||||
@ -312,8 +312,8 @@ public:
|
||||
// Returns whether a field is in the same region as the obj it points to.
|
||||
template <typename T>
|
||||
static bool is_in_same_region(T* p, oop obj) {
|
||||
assert(p != NULL, "p can't be NULL");
|
||||
assert(obj != NULL, "obj can't be NULL");
|
||||
assert(p != nullptr, "p can't be null");
|
||||
assert(obj != nullptr, "obj can't be null");
|
||||
return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
|
||||
}
|
||||
|
||||
@ -464,8 +464,8 @@ public:
|
||||
// available in non-product builds.
|
||||
#ifdef ASSERT
|
||||
void set_containing_set(HeapRegionSetBase* containing_set) {
|
||||
assert((containing_set != NULL && _containing_set == NULL) ||
|
||||
containing_set == NULL,
|
||||
assert((containing_set != nullptr && _containing_set == nullptr) ||
|
||||
containing_set == nullptr,
|
||||
"containing_set: " PTR_FORMAT " "
|
||||
"_containing_set: " PTR_FORMAT,
|
||||
p2i(containing_set), p2i(_containing_set));
|
||||
@ -546,7 +546,7 @@ public:
|
||||
// mr must not be empty. Must be trimmed to the allocated/parseable space in this region.
|
||||
// This region must be old or humongous.
|
||||
// Returns the next unscanned address if the designated objects were successfully
|
||||
// processed, NULL if an unparseable part of the heap was encountered (That should
|
||||
// processed, null if an unparseable part of the heap was encountered (That should
|
||||
// only happen when invoked concurrently with the mutator).
|
||||
template <bool in_gc_pause, class Closure>
|
||||
inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
|
||||
|
@ -54,7 +54,7 @@ inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
|
||||
*actual_size = want_to_allocate;
|
||||
return obj;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
|
||||
return obj;
|
||||
}
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
@ -335,14 +335,14 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
HeapRegion* sr = humongous_start_region();
|
||||
oop obj = cast_to_oop(sr->bottom());
|
||||
|
||||
// If concurrent and klass_or_null is NULL, then space has been
|
||||
// If concurrent and klass_or_null is null, then space has been
|
||||
// allocated but the object has not yet been published by setting
|
||||
// the klass. That can only happen if the card is stale. However,
|
||||
// we've already set the card clean, so we must return failure,
|
||||
// since the allocating thread could have performed a write to the
|
||||
// card that might be missed otherwise.
|
||||
if (!in_gc_pause && (obj->klass_or_null_acquire() == NULL)) {
|
||||
return NULL;
|
||||
if (!in_gc_pause && (obj->klass_or_null_acquire() == nullptr)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We have a well-formed humongous object at the start of sr.
|
||||
@ -491,7 +491,7 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
|
||||
assert(is_old(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
|
||||
|
||||
// Because mr has been trimmed to what's been allocated in this
|
||||
// region, the objects in these parts of the heap have non-NULL
|
||||
// region, the objects in these parts of the heap have non-null
|
||||
// klass pointers. There's no need to use klass_or_null to detect
|
||||
// in-progress allocation.
|
||||
// We might be in the progress of scrubbing this region and in this
|
||||
@ -512,7 +512,7 @@ inline bool HeapRegion::has_valid_age_in_surv_rate() const {
|
||||
}
|
||||
|
||||
inline bool HeapRegion::has_surv_rate_group() const {
|
||||
return _surv_rate_group != NULL;
|
||||
return _surv_rate_group != nullptr;
|
||||
}
|
||||
|
||||
inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const {
|
||||
@ -521,7 +521,7 @@ inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) c
|
||||
}
|
||||
|
||||
inline void HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group) {
|
||||
assert(surv_rate_group != NULL, "pre-condition");
|
||||
assert(surv_rate_group != nullptr, "pre-condition");
|
||||
assert(!has_surv_rate_group(), "pre-condition");
|
||||
assert(is_young(), "pre-condition");
|
||||
|
||||
@ -534,7 +534,7 @@ inline void HeapRegion::uninstall_surv_rate_group() {
|
||||
assert(has_valid_age_in_surv_rate(), "pre-condition");
|
||||
assert(is_young(), "pre-condition");
|
||||
|
||||
_surv_rate_group = NULL;
|
||||
_surv_rate_group = nullptr;
|
||||
_age_index = G1SurvRateGroup::InvalidAgeIndex;
|
||||
} else {
|
||||
assert(!has_valid_age_in_surv_rate(), "pre-condition");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,12 +62,12 @@ public:
|
||||
};
|
||||
|
||||
HeapRegionManager::HeapRegionManager() :
|
||||
_bot_mapper(NULL),
|
||||
_cardtable_mapper(NULL),
|
||||
_bot_mapper(nullptr),
|
||||
_cardtable_mapper(nullptr),
|
||||
_committed_map(),
|
||||
_allocated_heapregions_length(0),
|
||||
_regions(), _heap_mapper(NULL),
|
||||
_bitmap_mapper(NULL),
|
||||
_regions(), _heap_mapper(nullptr),
|
||||
_bitmap_mapper(nullptr),
|
||||
_free_list("Free list", new MasterFreeRegionListChecker())
|
||||
{ }
|
||||
|
||||
@ -90,7 +90,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
}
|
||||
|
||||
HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
|
||||
HeapRegion* hr = NULL;
|
||||
HeapRegion* hr = nullptr;
|
||||
bool from_head = !type.is_young();
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
|
||||
@ -99,14 +99,14 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re
|
||||
hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
|
||||
}
|
||||
|
||||
if (hr == NULL) {
|
||||
if (hr == nullptr) {
|
||||
// If there's a single active node or we did not get a region from our requested node,
|
||||
// try without requested node index.
|
||||
hr = _free_list.remove_region(from_head);
|
||||
}
|
||||
|
||||
if (hr != NULL) {
|
||||
assert(hr->next() == NULL, "Single region should not have next");
|
||||
if (hr != nullptr) {
|
||||
assert(hr->next() == nullptr, "Single region should not have next");
|
||||
assert(is_available(hr->hrm_index()), "Must be committed");
|
||||
|
||||
if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
|
||||
@ -120,7 +120,7 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re
|
||||
HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
|
||||
uint candidate = find_contiguous_in_free_list(num_regions);
|
||||
if (candidate == G1_NO_HRM_INDEX) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return allocate_free_regions_starting_at(candidate, num_regions);
|
||||
}
|
||||
@ -128,7 +128,7 @@ HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_region
|
||||
HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
|
||||
uint candidate = find_contiguous_allow_expand(num_regions);
|
||||
if (candidate == G1_NO_HRM_INDEX) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
expand_exact(candidate, num_regions, G1CollectedHeap::heap()->workers());
|
||||
return allocate_free_regions_starting_at(candidate, num_regions);
|
||||
@ -164,7 +164,7 @@ void HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pret
|
||||
commit_regions(start, num_regions, pretouch_workers);
|
||||
for (uint i = start; i < start + num_regions; i++) {
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
if (hr == NULL) {
|
||||
if (hr == nullptr) {
|
||||
hr = new_heap_region(i);
|
||||
OrderAccess::storestore();
|
||||
_regions.set_by_index(i, hr);
|
||||
@ -198,7 +198,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
|
||||
for (uint i = start; i < end; i++) {
|
||||
// Can't use at() here since region is no longer marked available.
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
assert(hr != NULL, "Region should still be present");
|
||||
assert(hr != nullptr, "Region should still be present");
|
||||
printer->uncommit(hr);
|
||||
}
|
||||
}
|
||||
@ -416,7 +416,7 @@ uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
expand_exact(expand_candidate, 1, NULL);
|
||||
expand_exact(expand_candidate, 1, nullptr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -487,7 +487,7 @@ uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
|
||||
}
|
||||
|
||||
HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
|
||||
guarantee(r != NULL, "Start region must be a valid region");
|
||||
guarantee(r != nullptr, "Start region must be a valid region");
|
||||
guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
|
||||
for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
@ -495,7 +495,7 @@ HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
|
||||
return hr;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
|
||||
@ -505,7 +505,7 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
|
||||
if (!is_available(i)) {
|
||||
continue;
|
||||
}
|
||||
guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
|
||||
guarantee(at(i) != nullptr, "Tried to access region %u that has a null HeapRegion*", i);
|
||||
bool res = blk->do_heap_region(at(i));
|
||||
if (res) {
|
||||
blk->set_incomplete();
|
||||
@ -535,9 +535,9 @@ uint HeapRegionManager::find_highest_free(bool* expanded) {
|
||||
// committed, expand at that index.
|
||||
for (uint curr = reserved_length(); curr-- > 0;) {
|
||||
HeapRegion *hr = _regions.get_by_index(curr);
|
||||
if (hr == NULL || !is_available(curr)) {
|
||||
if (hr == nullptr || !is_available(curr)) {
|
||||
// Found uncommitted and free region, expand to make it available for use.
|
||||
expand_exact(curr, 1, NULL);
|
||||
expand_exact(curr, 1, nullptr);
|
||||
assert(at(curr)->is_free(), "Region (%u) must be available and free after expand", curr);
|
||||
|
||||
*expanded = true;
|
||||
@ -647,7 +647,7 @@ void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
|
||||
|
||||
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
guarantee(res_idx != nullptr, "checking");
|
||||
|
||||
uint num_regions_found = 0;
|
||||
|
||||
@ -695,7 +695,7 @@ void HeapRegionManager::verify() {
|
||||
}
|
||||
num_committed++;
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
guarantee(hr != NULL, "invariant: i: %u", i);
|
||||
guarantee(hr != nullptr, "invariant: i: %u", i);
|
||||
guarantee(!prev_committed || hr->bottom() == prev_end,
|
||||
"invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
|
||||
i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
|
||||
@ -711,7 +711,7 @@ void HeapRegionManager::verify() {
|
||||
prev_end = hr->end();
|
||||
}
|
||||
for (uint i = _allocated_heapregions_length; i < reserved_length(); i++) {
|
||||
guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
|
||||
guarantee(_regions.get_by_index(i) == nullptr, "invariant i: %u", i);
|
||||
}
|
||||
|
||||
guarantee(num_committed == length(), "Found %u committed regions, but should be %u", num_committed, length());
|
||||
@ -725,7 +725,7 @@ void HeapRegionManager::verify_optional() {
|
||||
#endif // PRODUCT
|
||||
|
||||
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
|
||||
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
|
||||
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(nullptr) {
|
||||
uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
|
||||
memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
|
||||
_claims = new_claims;
|
||||
@ -798,7 +798,7 @@ public:
|
||||
FreeRegionList *free_list = worker_freelist(worker_id);
|
||||
for (uint i = start; i < end; i++) {
|
||||
HeapRegion *region = _hrm->at_or_null(i);
|
||||
if (region != NULL && region->is_free()) {
|
||||
if (region != nullptr && region->is_free()) {
|
||||
// Need to clear old links to allow to be added to new freelist.
|
||||
region->unlink_from_list();
|
||||
free_list->add_to_tail(region);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,7 +40,7 @@ class WorkerThreads;
|
||||
|
||||
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
||||
protected:
|
||||
virtual HeapRegion* default_value() const { return NULL; }
|
||||
virtual HeapRegion* default_value() const { return nullptr; }
|
||||
};
|
||||
|
||||
// This class keeps track of the actual heap memory, auxiliary data
|
||||
@ -88,7 +88,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
|
||||
|
||||
// Pass down commit calls to the VirtualSpace.
|
||||
void commit_regions(uint index, size_t num_regions = 1, WorkerThreads* pretouch_workers = NULL);
|
||||
void commit_regions(uint index, size_t num_regions = 1, WorkerThreads* pretouch_workers = nullptr);
|
||||
|
||||
// Initialize the HeapRegions in the range and put them on the free list.
|
||||
void initialize_regions(uint start, uint num_regions);
|
||||
@ -125,7 +125,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
G1RegionToSpaceMapper* _bitmap_mapper;
|
||||
FreeRegionList _free_list;
|
||||
|
||||
void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = NULL);
|
||||
void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = nullptr);
|
||||
|
||||
// G1RegionCommittedMap helpers. These functions do the work that comes with
|
||||
// the state changes tracked by G1CommittedRegionMap. To make sure this is
|
||||
@ -174,7 +174,7 @@ public:
|
||||
// is valid.
|
||||
inline HeapRegion* at(uint index) const;
|
||||
|
||||
// Return the HeapRegion at the given index, NULL if the index
|
||||
// Return the HeapRegion at the given index, null if the index
|
||||
// is for an unavailable region.
|
||||
inline HeapRegion* at_or_null(uint index) const;
|
||||
|
||||
@ -186,7 +186,7 @@ public:
|
||||
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
|
||||
|
||||
// If addr is within the committed space return its corresponding
|
||||
// HeapRegion, otherwise return NULL.
|
||||
// HeapRegion, otherwise return null.
|
||||
inline HeapRegion* addr_to_region(HeapWord* addr) const;
|
||||
|
||||
// Insert the given region into the free region list.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,17 +46,17 @@ inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
|
||||
inline HeapRegion* HeapRegionManager::at(uint index) const {
|
||||
assert(is_available(index), "pre-condition");
|
||||
HeapRegion* hr = _regions.get_by_index(index);
|
||||
assert(hr != NULL, "sanity");
|
||||
assert(hr != nullptr, "sanity");
|
||||
assert(hr->hrm_index() == index, "sanity");
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionManager::at_or_null(uint index) const {
|
||||
if (!is_available(index)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
HeapRegion* hr = _regions.get_by_index(index);
|
||||
assert(hr != NULL, "All available regions must have a HeapRegion but index %u has not.", index);
|
||||
assert(hr != nullptr, "All available regions must have a HeapRegion but index %u has not.", index);
|
||||
assert(hr->hrm_index() == index, "sanity");
|
||||
return hr;
|
||||
}
|
||||
@ -69,7 +69,7 @@ inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) c
|
||||
if (index < reserved_length() && is_available(index) && at(index)->is_continues_humongous()) {
|
||||
return at(index);
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ void HeapRegionRemSet::print_static_mem_size(outputStream* out) {
|
||||
// When not at safepoint the CodeCache_lock must be held during modifications.
|
||||
|
||||
void HeapRegionRemSet::add_code_root(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
assert(nm != nullptr, "sanity");
|
||||
assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
|
||||
"should call add_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
|
||||
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
|
||||
@ -113,7 +113,7 @@ void HeapRegionRemSet::add_code_root(nmethod* nm) {
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::add_code_root_locked(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
assert(nm != nullptr, "sanity");
|
||||
assert((CodeCache_lock->owned_by_self() ||
|
||||
(SafepointSynchronize::is_at_safepoint() &&
|
||||
(_m.owned_by_self() || Thread::current()->is_VM_thread()))),
|
||||
@ -127,10 +127,10 @@ void HeapRegionRemSet::add_code_root_locked(nmethod* nm) {
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::remove_code_root(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
assert(nm != nullptr, "sanity");
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
MutexLocker ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
|
||||
MutexLocker ml(CodeCache_lock->owned_by_self() ? nullptr : &_m, Mutex::_no_safepoint_check_flag);
|
||||
_code_roots.remove(nm);
|
||||
|
||||
// Check that there were no duplicates
|
||||
|
@ -34,7 +34,7 @@ uint FreeRegionList::_unrealistically_long_length = 0;
|
||||
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||
assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index());
|
||||
assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions
|
||||
assert(_checker == NULL || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
|
||||
assert(_checker == nullptr || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
|
||||
hr->hrm_index(), hr->get_type_str(), name());
|
||||
assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name());
|
||||
assert(!hr->is_empty() || hr->is_free(), "Empty region %u is not free or old for set %s", hr->hrm_index(), name());
|
||||
@ -100,13 +100,13 @@ void FreeRegionList::remove_all() {
|
||||
verify_optional();
|
||||
|
||||
HeapRegion* curr = _head;
|
||||
while (curr != NULL) {
|
||||
while (curr != nullptr) {
|
||||
verify_region(curr);
|
||||
|
||||
HeapRegion* next = curr->next();
|
||||
curr->set_next(NULL);
|
||||
curr->set_prev(NULL);
|
||||
curr->set_containing_set(NULL);
|
||||
curr->set_next(nullptr);
|
||||
curr->set_prev(nullptr);
|
||||
curr->set_containing_set(nullptr);
|
||||
|
||||
decrease_length(curr->node_index());
|
||||
|
||||
@ -127,7 +127,7 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_node_info != NULL && from_list->_node_info != NULL) {
|
||||
if (_node_info != nullptr && from_list->_node_info != nullptr) {
|
||||
_node_info->add(from_list->_node_info);
|
||||
}
|
||||
|
||||
@ -136,9 +136,9 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
// In set_containing_set() we check that we either set the value
|
||||
// from NULL to non-NULL or vice versa to catch bugs. So, we have
|
||||
// to NULL it first before setting it to the value.
|
||||
hr->set_containing_set(NULL);
|
||||
// from null to non-null or vice versa to catch bugs. So, we have
|
||||
// to null it first before setting it to the value.
|
||||
hr->set_containing_set(nullptr);
|
||||
hr->set_containing_set(this);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -161,7 +161,7 @@ void FreeRegionList::append_ordered(FreeRegionList* from_list) {
|
||||
|
||||
if (is_empty()) {
|
||||
// Make from_list the current list.
|
||||
assert_free_region_list(length() == 0 && _tail == NULL, "invariant");
|
||||
assert_free_region_list(length() == 0 && _tail == nullptr, "invariant");
|
||||
_head = from_list->_head;
|
||||
_tail = from_list->_tail;
|
||||
} else {
|
||||
@ -185,29 +185,29 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
|
||||
}
|
||||
|
||||
if (is_empty()) {
|
||||
assert_free_region_list(length() == 0 && _tail == NULL, "invariant");
|
||||
assert_free_region_list(length() == 0 && _tail == nullptr, "invariant");
|
||||
_head = from_list->_head;
|
||||
_tail = from_list->_tail;
|
||||
} else {
|
||||
HeapRegion* curr_to = _head;
|
||||
HeapRegion* curr_from = from_list->_head;
|
||||
|
||||
while (curr_from != NULL) {
|
||||
while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) {
|
||||
while (curr_from != nullptr) {
|
||||
while (curr_to != nullptr && curr_to->hrm_index() < curr_from->hrm_index()) {
|
||||
curr_to = curr_to->next();
|
||||
}
|
||||
|
||||
if (curr_to == NULL) {
|
||||
if (curr_to == nullptr) {
|
||||
// The rest of the from list should be added as tail
|
||||
_tail->set_next(curr_from);
|
||||
curr_from->set_prev(_tail);
|
||||
curr_from = NULL;
|
||||
curr_from = nullptr;
|
||||
} else {
|
||||
HeapRegion* next_from = curr_from->next();
|
||||
|
||||
curr_from->set_next(curr_to);
|
||||
curr_from->set_prev(curr_to->prev());
|
||||
if (curr_to->prev() == NULL) {
|
||||
if (curr_to->prev() == nullptr) {
|
||||
_head = curr_from;
|
||||
} else {
|
||||
curr_to->prev()->set_next(curr_from);
|
||||
@ -229,14 +229,14 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
|
||||
#ifdef ASSERT
|
||||
void FreeRegionList::verify_region_to_remove(HeapRegion* curr, HeapRegion* next) {
|
||||
assert_free_region_list(_head != next, "invariant");
|
||||
if (next != NULL) {
|
||||
if (next != nullptr) {
|
||||
assert_free_region_list(next->prev() == curr, "invariant");
|
||||
assert_free_region_list(_tail != curr, "invariant");
|
||||
} else {
|
||||
assert_free_region_list(_tail == curr, "invariant");
|
||||
}
|
||||
HeapRegion* prev = curr->prev();
|
||||
if (prev == NULL) {
|
||||
if (prev == nullptr) {
|
||||
assert_free_region_list(_head == curr, "invariant");
|
||||
} else {
|
||||
assert_free_region_list(_head != curr, "invariant");
|
||||
@ -268,11 +268,11 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
|
||||
verify_region_to_remove(curr, next);
|
||||
|
||||
if (_last == curr) {
|
||||
_last = NULL;
|
||||
_last = nullptr;
|
||||
}
|
||||
|
||||
curr->set_next(NULL);
|
||||
curr->set_prev(NULL);
|
||||
curr->set_next(nullptr);
|
||||
curr->set_prev(nullptr);
|
||||
remove(curr);
|
||||
|
||||
count++;
|
||||
@ -282,12 +282,12 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
|
||||
curr = next;
|
||||
}
|
||||
|
||||
if (prev == NULL) {
|
||||
if (prev == nullptr) {
|
||||
_head = next;
|
||||
} else {
|
||||
prev->set_next(next);
|
||||
}
|
||||
if (next == NULL) {
|
||||
if (next == nullptr) {
|
||||
_tail = prev;
|
||||
} else {
|
||||
next->set_prev(prev);
|
||||
@ -319,25 +319,25 @@ void FreeRegionList::verify() {
|
||||
|
||||
void FreeRegionList::clear() {
|
||||
_length = 0;
|
||||
_head = NULL;
|
||||
_tail = NULL;
|
||||
_last = NULL;
|
||||
_head = nullptr;
|
||||
_tail = nullptr;
|
||||
_last = nullptr;
|
||||
|
||||
if (_node_info!= NULL) {
|
||||
if (_node_info!= nullptr) {
|
||||
_node_info->clear();
|
||||
}
|
||||
}
|
||||
|
||||
void FreeRegionList::verify_list() {
|
||||
HeapRegion* curr = _head;
|
||||
HeapRegion* prev1 = NULL;
|
||||
HeapRegion* prev0 = NULL;
|
||||
HeapRegion* prev1 = nullptr;
|
||||
HeapRegion* prev0 = nullptr;
|
||||
uint count = 0;
|
||||
size_t capacity = 0;
|
||||
uint last_index = 0;
|
||||
|
||||
guarantee(_head == NULL || _head->prev() == NULL, "_head should not have a prev");
|
||||
while (curr != NULL) {
|
||||
guarantee(_head == nullptr || _head->prev() == nullptr, "_head should not have a prev");
|
||||
while (curr != nullptr) {
|
||||
verify_region(curr);
|
||||
|
||||
count++;
|
||||
@ -345,7 +345,7 @@ void FreeRegionList::verify_list() {
|
||||
"[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: " PTR_FORMAT " prev0: " PTR_FORMAT " " "prev1: " PTR_FORMAT " length: %u",
|
||||
name(), count, p2i(curr), p2i(prev0), p2i(prev1), length());
|
||||
|
||||
if (curr->next() != NULL) {
|
||||
if (curr->next() != nullptr) {
|
||||
guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
|
||||
}
|
||||
guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted");
|
||||
@ -359,25 +359,25 @@ void FreeRegionList::verify_list() {
|
||||
}
|
||||
|
||||
guarantee(_tail == prev0, "Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index());
|
||||
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
|
||||
guarantee(_tail == nullptr || _tail->next() == nullptr, "_tail should not have a next");
|
||||
guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
|
||||
}
|
||||
|
||||
|
||||
FreeRegionList::FreeRegionList(const char* name, HeapRegionSetChecker* checker):
|
||||
HeapRegionSetBase(name, checker),
|
||||
_node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : NULL) {
|
||||
_node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : nullptr) {
|
||||
|
||||
clear();
|
||||
}
|
||||
|
||||
FreeRegionList::~FreeRegionList() {
|
||||
if (_node_info != NULL) {
|
||||
if (_node_info != nullptr) {
|
||||
delete _node_info;
|
||||
}
|
||||
}
|
||||
|
||||
FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(NULL),
|
||||
FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(nullptr),
|
||||
_num_nodes(_numa->num_active_nodes()) {
|
||||
assert(UseNUMA, "Invariant");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,7 +82,7 @@ protected:
|
||||
void verify_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
|
||||
void check_mt_safety() {
|
||||
if (_checker != NULL) {
|
||||
if (_checker != nullptr) {
|
||||
_checker->check_mt_safety();
|
||||
}
|
||||
}
|
||||
@ -190,7 +190,7 @@ protected:
|
||||
virtual void clear();
|
||||
|
||||
public:
|
||||
FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL);
|
||||
FreeRegionList(const char* name, HeapRegionSetChecker* checker = nullptr);
|
||||
~FreeRegionList();
|
||||
|
||||
void verify_list();
|
||||
@ -249,7 +249,7 @@ private:
|
||||
|
||||
public:
|
||||
bool more_available() {
|
||||
return _curr != NULL;
|
||||
return _curr != nullptr;
|
||||
}
|
||||
|
||||
HeapRegion* get_next() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,9 +31,9 @@
|
||||
|
||||
inline void HeapRegionSetBase::add(HeapRegion* hr) {
|
||||
check_mt_safety();
|
||||
assert_heap_region_set(hr->containing_set() == NULL, "should not already have a containing set");
|
||||
assert_heap_region_set(hr->next() == NULL, "should not already be linked");
|
||||
assert_heap_region_set(hr->prev() == NULL, "should not already be linked");
|
||||
assert_heap_region_set(hr->containing_set() == nullptr, "should not already have a containing set");
|
||||
assert_heap_region_set(hr->next() == nullptr, "should not already be linked");
|
||||
assert_heap_region_set(hr->prev() == nullptr, "should not already be linked");
|
||||
|
||||
_length++;
|
||||
hr->set_containing_set(this);
|
||||
@ -43,23 +43,23 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) {
|
||||
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
|
||||
check_mt_safety();
|
||||
verify_region(hr);
|
||||
assert_heap_region_set(hr->next() == NULL, "should already be unlinked");
|
||||
assert_heap_region_set(hr->prev() == NULL, "should already be unlinked");
|
||||
assert_heap_region_set(hr->next() == nullptr, "should already be unlinked");
|
||||
assert_heap_region_set(hr->prev() == nullptr, "should already be unlinked");
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
hr->set_containing_set(nullptr);
|
||||
assert_heap_region_set(_length > 0, "pre-condition");
|
||||
_length--;
|
||||
}
|
||||
|
||||
inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) {
|
||||
assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
|
||||
(length() > 0 && _head != NULL && _tail != NULL && _tail->hrm_index() < region_to_add->hrm_index()),
|
||||
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
|
||||
(length() > 0 && _head != nullptr && _tail != nullptr && _tail->hrm_index() < region_to_add->hrm_index()),
|
||||
"invariant");
|
||||
// add() will verify the region and check mt safety.
|
||||
add(region_to_add);
|
||||
|
||||
if (_head != NULL) {
|
||||
// Link into list, next is already NULL, no need to set.
|
||||
if (_head != nullptr) {
|
||||
// Link into list, next is already null, no need to set.
|
||||
region_to_add->set_prev(_tail);
|
||||
_tail->set_next(region_to_add);
|
||||
_tail = region_to_add;
|
||||
@ -72,37 +72,37 @@ inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) {
|
||||
}
|
||||
|
||||
inline void FreeRegionList::add_ordered(HeapRegion* hr) {
|
||||
assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
|
||||
(length() > 0 && _head != NULL && _tail != NULL),
|
||||
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
|
||||
(length() > 0 && _head != nullptr && _tail != nullptr),
|
||||
"invariant");
|
||||
// add() will verify the region and check mt safety.
|
||||
add(hr);
|
||||
|
||||
// Now link the region
|
||||
if (_head != NULL) {
|
||||
if (_head != nullptr) {
|
||||
HeapRegion* curr;
|
||||
|
||||
if (_last != NULL && _last->hrm_index() < hr->hrm_index()) {
|
||||
if (_last != nullptr && _last->hrm_index() < hr->hrm_index()) {
|
||||
curr = _last;
|
||||
} else {
|
||||
curr = _head;
|
||||
}
|
||||
|
||||
// Find first entry with a Region Index larger than entry to insert.
|
||||
while (curr != NULL && curr->hrm_index() < hr->hrm_index()) {
|
||||
while (curr != nullptr && curr->hrm_index() < hr->hrm_index()) {
|
||||
curr = curr->next();
|
||||
}
|
||||
|
||||
hr->set_next(curr);
|
||||
|
||||
if (curr == NULL) {
|
||||
if (curr == nullptr) {
|
||||
// Adding at the end
|
||||
hr->set_prev(_tail);
|
||||
_tail->set_next(hr);
|
||||
_tail = hr;
|
||||
} else if (curr->prev() == NULL) {
|
||||
} else if (curr->prev() == nullptr) {
|
||||
// Adding at the beginning
|
||||
hr->set_prev(NULL);
|
||||
hr->set_prev(nullptr);
|
||||
_head = hr;
|
||||
curr->set_prev(hr);
|
||||
} else {
|
||||
@ -123,12 +123,12 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
|
||||
inline HeapRegion* FreeRegionList::remove_from_head_impl() {
|
||||
HeapRegion* result = _head;
|
||||
_head = result->next();
|
||||
if (_head == NULL) {
|
||||
_tail = NULL;
|
||||
if (_head == nullptr) {
|
||||
_tail = nullptr;
|
||||
} else {
|
||||
_head->set_prev(NULL);
|
||||
_head->set_prev(nullptr);
|
||||
}
|
||||
result->set_next(NULL);
|
||||
result->set_next(nullptr);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -136,12 +136,12 @@ inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
|
||||
HeapRegion* result = _tail;
|
||||
|
||||
_tail = result->prev();
|
||||
if (_tail == NULL) {
|
||||
_head = NULL;
|
||||
if (_tail == nullptr) {
|
||||
_head = nullptr;
|
||||
} else {
|
||||
_tail->set_next(NULL);
|
||||
_tail->set_next(nullptr);
|
||||
}
|
||||
result->set_prev(NULL);
|
||||
result->set_prev(nullptr);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -150,9 +150,9 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
|
||||
verify_optional();
|
||||
|
||||
if (is_empty()) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
assert_free_region_list(length() > 0 && _head != NULL && _tail != NULL, "invariant");
|
||||
assert_free_region_list(length() > 0 && _head != nullptr && _tail != nullptr, "invariant");
|
||||
|
||||
HeapRegion* hr;
|
||||
|
||||
@ -163,7 +163,7 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
|
||||
}
|
||||
|
||||
if (_last == hr) {
|
||||
_last = NULL;
|
||||
_last = nullptr;
|
||||
}
|
||||
|
||||
// remove() will verify the region and check mt safety.
|
||||
@ -185,7 +185,7 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
|
||||
size_t cur_depth = 0;
|
||||
if (from_head) {
|
||||
for (cur = _head;
|
||||
cur != NULL && cur_depth < max_search_depth;
|
||||
cur != nullptr && cur_depth < max_search_depth;
|
||||
cur = cur->next(), ++cur_depth) {
|
||||
if (requested_node_index == cur->node_index()) {
|
||||
break;
|
||||
@ -193,7 +193,7 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
|
||||
}
|
||||
} else {
|
||||
for (cur = _tail;
|
||||
cur != NULL && cur_depth < max_search_depth;
|
||||
cur != nullptr && cur_depth < max_search_depth;
|
||||
cur = cur->prev(), ++cur_depth) {
|
||||
if (requested_node_index == cur->node_index()) {
|
||||
break;
|
||||
@ -202,28 +202,28 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
|
||||
}
|
||||
|
||||
// Didn't find a region to use.
|
||||
if (cur == NULL || cur_depth >= max_search_depth) {
|
||||
return NULL;
|
||||
if (cur == nullptr || cur_depth >= max_search_depth) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Splice the region out of the list.
|
||||
HeapRegion* prev = cur->prev();
|
||||
HeapRegion* next = cur->next();
|
||||
if (prev == NULL) {
|
||||
if (prev == nullptr) {
|
||||
_head = next;
|
||||
} else {
|
||||
prev->set_next(next);
|
||||
}
|
||||
if (next == NULL) {
|
||||
if (next == nullptr) {
|
||||
_tail = prev;
|
||||
} else {
|
||||
next->set_prev(prev);
|
||||
}
|
||||
cur->set_prev(NULL);
|
||||
cur->set_next(NULL);
|
||||
cur->set_prev(nullptr);
|
||||
cur->set_next(nullptr);
|
||||
|
||||
if (_last == cur) {
|
||||
_last = NULL;
|
||||
_last = nullptr;
|
||||
}
|
||||
|
||||
remove(cur);
|
||||
@ -252,19 +252,19 @@ inline uint FreeRegionList::NodeInfo::length(uint node_index) const {
|
||||
}
|
||||
|
||||
inline void FreeRegionList::increase_length(uint node_index) {
|
||||
if (_node_info != NULL) {
|
||||
if (_node_info != nullptr) {
|
||||
return _node_info->increase_length(node_index);
|
||||
}
|
||||
}
|
||||
|
||||
inline void FreeRegionList::decrease_length(uint node_index) {
|
||||
if (_node_info != NULL) {
|
||||
if (_node_info != nullptr) {
|
||||
return _node_info->decrease_length(node_index);
|
||||
}
|
||||
}
|
||||
|
||||
inline uint FreeRegionList::length(uint node_index) const {
|
||||
if (_node_info != NULL) {
|
||||
if (_node_info != nullptr) {
|
||||
return _node_info->length(node_index);
|
||||
} else {
|
||||
return 0;
|
||||
|
@ -56,7 +56,7 @@ const char* HeapRegionType::get_str() const {
|
||||
case OldTag: return "OLD";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // keep some compilers happy
|
||||
return nullptr; // keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ const char* HeapRegionType::get_short_str() const {
|
||||
case OldTag: return "O";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // keep some compilers happy
|
||||
return nullptr; // keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user