8301494: Replace NULL with nullptr in cpu/arm

Reviewed-by: dholmes, coleenp
This commit is contained in:
Johan Sjölen 2023-02-17 11:24:41 +00:00
parent 4f1cffd52c
commit c4ffe4bf63
38 changed files with 314 additions and 314 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,13 +59,13 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
assert(info != NULL, "must have info");
assert(info != nullptr, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _index(index), _array(), _throw_index_out_of_bounds_exception(true) {
assert(info != NULL, "must have info");
assert(info != nullptr, "must have info");
_info = new CodeEmitInfo(info);
}
@ -345,7 +345,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
address target = nullptr;
relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -200,7 +200,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
int LIR_Assembler::emit_exception_handler() {
address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
bailout("exception handler overflow");
return -1;
}
@ -240,7 +240,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ verify_not_null_oop(Rexception_obj);
// Perform needed unlocking
MonitorExitStub* stub = NULL;
MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::R0_opr);
stub = new MonitorExitStub(FrameMap::R0_opr, true, 0);
@ -253,7 +253,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
// Emit the slow path assembly
if (stub != NULL) {
if (stub != nullptr) {
stub->emit_code(this);
}
@ -263,7 +263,7 @@ int LIR_Assembler::emit_unwind_handler() {
int LIR_Assembler::emit_deopt_handler() {
address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
bailout("deopt handler overflow");
return -1;
}
@ -402,13 +402,13 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
CodeEmitInfo* info, bool wide) {
assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise");
assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == nullptr),"cannot handle otherwise");
__ mov(Rtemp, 0);
int null_check_offset = code_offset();
__ str(Rtemp, as_Address(dest->as_address_ptr()));
if (info != NULL) {
if (info != nullptr) {
assert(false, "arm32 didn't support this before, investigate if bug");
add_debug_info_for_null_check(null_check_offset, info);
}
@ -496,7 +496,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
Register base_reg = to_addr->base()->as_pointer_register();
const bool needs_patching = (patch_code != lir_patch_none);
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
}
@ -547,7 +547,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
__ add(Rtemp, base_reg, to_addr->index()->as_register());
base_reg = Rtemp;
__ str(from_lo, Address(Rtemp));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -556,7 +556,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
__ str(from_hi, Address(Rtemp, BytesPerWord));
} else if (base_reg == from_lo) {
__ str(from_hi, as_Address_hi(to_addr));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_high, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -565,7 +565,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
__ str(from_lo, as_Address_lo(to_addr));
} else {
__ str(from_lo, as_Address_lo(to_addr));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -605,11 +605,11 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
ShouldNotReachHere();
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check(null_check_offset, info);
}
if (patch != NULL) {
if (patch != nullptr) {
// Offset embedded into LDR/STR instruction may appear not enough
// to address a field. So, provide a space for one more instruction
// that will deal with larger offsets.
@ -698,11 +698,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
Register base_reg = addr->base()->as_pointer_register();
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
@ -756,7 +756,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
__ add(Rtemp, base_reg, addr->index()->as_register());
base_reg = Rtemp;
__ ldr(to_lo, Address(Rtemp));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -765,7 +765,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
__ ldr(to_hi, Address(Rtemp, BytesPerWord));
} else if (base_reg == to_lo) {
__ ldr(to_hi, as_Address_hi(addr));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_high, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -774,7 +774,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
__ ldr(to_lo, as_Address_lo(addr));
} else {
__ ldr(to_lo, as_Address_lo(addr));
if (patch != NULL) {
if (patch != nullptr) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
@ -814,7 +814,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
ShouldNotReachHere();
}
if (patch != NULL) {
if (patch != nullptr) {
// Offset embedded into LDR/STR instruction may appear not enough
// to address a field. So, provide a space for one more instruction
// that will deal with larger offsets.
@ -861,10 +861,10 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
#ifdef ASSERT
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
if (op->block() != NULL) _branch_target_blocks.append(op->block());
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
assert(op->info() == NULL, "CodeEmitInfo?");
assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
if (op->block() != nullptr) _branch_target_blocks.append(op->block());
if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
assert(op->info() == nullptr, "CodeEmitInfo?");
#endif // ASSERT
#ifdef __SOFTFP__
@ -1021,9 +1021,9 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data != nullptr, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
// The offset is large so bias the mdo by the base of the slot so
@ -1036,7 +1036,7 @@ void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
Register obj, Register mdo, Register data_val, Label* obj_is_null) {
assert(method != NULL, "Should have method");
assert(method != nullptr, "Should have method");
assert_different_registers(obj, mdo, data_val);
setup_md_access(method, bci, md, data, mdo_offset_bias);
Label not_null;
@ -1511,7 +1511,7 @@ static int reg_size(LIR_Opr op) {
#endif
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "unused on this code path");
assert(info == nullptr, "unused on this code path");
assert(dest->is_register(), "wrong items state");
if (right->is_address()) {
@ -1788,12 +1788,12 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
break;
}
case T_OBJECT:
assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
assert(opr2->as_constant_ptr()->as_jobject() == nullptr, "cannot handle otherwise");
__ cmp(opr1->as_register(), 0);
break;
case T_METADATA:
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests");
assert(opr2->as_constant_ptr()->as_metadata() == NULL, "cannot handle otherwise");
assert(opr2->as_constant_ptr()->as_metadata() == nullptr, "cannot handle otherwise");
__ cmp(opr1->as_register(), 0);
break;
default:
@ -1941,13 +1941,13 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) {
if (stub == nullptr) {
BAILOUT("static call stub overflow");
}
DEBUG_ONLY(int offset = code_offset();)
InlinedMetadata metadata_literal(NULL);
InlinedMetadata metadata_literal(nullptr);
__ relocate(static_stub_Relocation::spec(call_pc));
// If not a single instruction, NativeMovConstReg::next_instruction_address()
// must jump over the whole following ldr_literal.
@ -2101,11 +2101,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
// If we don't know anything or it's an object array, just go through the generic arraycopy
if (default_type == NULL) {
if (default_type == nullptr) {
// save arguments, because they will be killed by a runtime call
save_in_reserved_area(R0, R1, R2, R3);
@ -2114,7 +2114,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
#ifndef PRODUCT
if (PrintC1Statistics) {
__ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
@ -2139,12 +2139,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
return;
}
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(),
"must be true at this point");
int elem_size = type2aelembytes(basic_type);
int shift = exact_log2(elem_size);
// Check for NULL
// Check for null
if (flags & LIR_OpArrayCopy::src_null_check) {
if (flags & LIR_OpArrayCopy::dst_null_check) {
__ cmp(src, 0);
@ -2252,7 +2252,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// get here
assert_different_registers(tmp, tmp2, R6, altFP_7_11);
__ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
__ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == nullptr ? stub->entry() : &slow, nullptr);
__ mov(R6, R0);
__ mov(altFP_7_11, R1);
@ -2263,7 +2263,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov(R0, R6);
__ mov(R1, altFP_7_11);
if (copyfunc_addr != NULL) { // use stub if available
if (copyfunc_addr != nullptr) { // use stub if available
// src is not a sub class of dst so we have to do a
// per-element check.
@ -2432,7 +2432,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register lock = op->lock_opr()->as_pointer_register();
if (UseHeavyMonitors) {
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
@ -2440,7 +2440,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
} else if (op->code() == lir_unlock) {
@ -2456,7 +2456,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info();
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
@ -2474,9 +2474,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types
ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
assert(op->tmp1()->is_register(), "tmp1 must be allocated");
@ -2502,7 +2502,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, tmp1, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
@ -2531,7 +2531,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
__ mov_metadata(tmp1, known_klass->constant_encoding());
@ -2634,7 +2634,7 @@ void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_cod
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
assert(!tmp->is_valid(), "don't need temporary");
__ call(dest);
if (info != NULL) {
if (info != nullptr) {
add_call_info_here(info);
}
}
@ -2679,7 +2679,7 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
}
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check(null_check_offset, info);
}
}
@ -2746,11 +2746,11 @@ void LIR_Assembler::peephole(LIR_List* lir) {
// moves from illegalOpr should be removed when converting LIR to native assembly
LIR_Op2* cmp = op->as_Op2();
assert(cmp != NULL, "cmp LIR instruction is not an op2");
assert(cmp != nullptr, "cmp LIR instruction is not an op2");
if (i + 1 < inst_length) {
LIR_Op2* cmove = inst->at(i + 1)->as_Op2();
if (cmove != NULL && cmove->code() == lir_cmove) {
if (cmove != nullptr && cmove->code() == lir_cmove) {
LIR_Opr cmove_res = cmove->result_opr();
bool res_is_op1 = cmove_res == cmp->in_opr1();
bool res_is_op2 = cmove_res == cmp->in_opr2();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,13 +124,13 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
bool LIRGenerator::can_inline_as_constant(Value v) const {
if (v->type()->as_IntConstant() != NULL) {
if (v->type()->as_IntConstant() != nullptr) {
return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value());
} else if (v->type()->as_ObjectConstant() != NULL) {
} else if (v->type()->as_ObjectConstant() != nullptr) {
return v->type()->as_ObjectConstant()->value()->is_null_object();
} else if (v->type()->as_FloatConstant() != NULL) {
} else if (v->type()->as_FloatConstant() != nullptr) {
return v->type()->as_FloatConstant()->value() == 0.0f;
} else if (v->type()->as_DoubleConstant() != NULL) {
} else if (v->type()->as_DoubleConstant() != nullptr) {
return v->type()->as_DoubleConstant()->value() == 0.0;
}
return false;
@ -412,7 +412,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
LIR_Opr lock = new_pointer_register();
LIR_Opr hdr = new_pointer_register();
CodeEmitInfo* info_for_exception = NULL;
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
@ -440,15 +440,15 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
// _ineg, _lneg, _fneg, _dneg
void LIRGenerator::do_NegateOp(NegateOp* x) {
#ifdef __SOFTFP__
address runtime_func = NULL;
address runtime_func = nullptr;
ValueTag tag = x->type()->tag();
if (tag == floatTag) {
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg);
} else if (tag == doubleTag) {
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg);
}
if (runtime_func != NULL) {
set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL));
if (runtime_func != nullptr) {
set_result(x, call_runtime(x->x(), runtime_func, x->type(), nullptr));
return;
}
#endif // __SOFTFP__
@ -514,7 +514,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
#endif // __SOFTFP__
}
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), nullptr);
set_result(x, result);
}
@ -528,7 +528,7 @@ void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, Cod
// for _ladd, _lmul, _lsub, _ldiv, _lrem
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
CodeEmitInfo* info = NULL;
CodeEmitInfo* info = nullptr;
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
info = state_for(x);
}
@ -557,7 +557,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
ShouldNotReachHere();
return;
}
LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), nullptr);
set_result(x, result);
break;
}
@ -568,7 +568,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item();
right.load_item();
rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
break;
}
default:
@ -658,7 +658,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
}
if (count.is_constant()) {
assert(count.type()->as_IntConstant() != NULL, "should be");
assert(count.type()->as_IntConstant() != nullptr, "should be");
count.dont_load_item();
} else {
count.load_item();
@ -712,7 +712,7 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
default:
ShouldNotReachHere();
}
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), nullptr);
set_result(x, result);
#else // __SOFTFP__
LIRItem left(x->x(), this);
@ -829,10 +829,10 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
LIR_Opr result;
if (x->number_of_arguments() == 1) {
result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL);
result = call_runtime(x->argument_at(0), runtime_func, x->type(), nullptr);
} else {
assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic");
result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL);
result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), nullptr);
}
set_result(x, result);
}
@ -921,12 +921,12 @@ void LIRGenerator::do_Convert(Convert* x) {
LIRItem value(x->value(), this);
value.load_item();
LIR_Opr reg = rlock_result(x);
__ convert(x->op(), value.result(), reg, NULL);
__ convert(x->op(), value.result(), reg, nullptr);
return;
}
}
LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL);
LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), nullptr);
set_result(x, result);
}
@ -986,7 +986,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code
LIR_Opr len = length.result();
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
@ -1015,14 +1015,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims();
int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL);
LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Need to get the info before, as the items may become invalid through item_free
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
@ -1069,7 +1069,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
patching_info = state_for(x, x->state_before());
}
@ -1082,11 +1082,11 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
CodeStub* stub;
if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
LIR_OprFact::illegalOpr, info_for_exception);
} else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception,
Deoptimization::Reason_class_check,
Deoptimization::Action_none);
@ -1107,7 +1107,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
@ -1205,7 +1205,7 @@ void LIRGenerator::do_soft_float_compare(If* x) {
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
// Call float compare function, returns (1,0) if true or false.
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL);
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, nullptr);
__ cmp(lir_cond_equal, result,
compare_to_zero ?
LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1));
@ -1295,7 +1295,7 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) {
if (value->is_double_cpu()) {
assert(address->index()->is_illegal(), "should have a constant displacement");
LIR_Address* store_addr = NULL;
LIR_Address* store_addr = nullptr;
if (address->disp() != 0) {
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);
@ -1314,7 +1314,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
if (result->is_double_cpu()) {
assert(address->index()->is_illegal(), "should have a constant displacement");
LIR_Address* load_addr = NULL;
LIR_Address* load_addr = nullptr;
if (address->disp() != 0) {
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -263,7 +263,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// Load displaced header and object from the lock
ldr(hdr, Address(disp_hdr, mark_offset));
// If hdr is NULL, we've got recursive locking and there's nothing more to do
// If hdr is null, we've got recursive locking and there's nothing more to do
cbz(hdr, done);
// load object

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,6 @@
void unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
#endif // CPU_ARM_C1_MACROASSEMBLER_ARM_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -288,7 +288,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
// Save registers, if required.
OopMapSet* oop_maps = new OopMapSet();
OopMap* oop_map = NULL;
OopMap* oop_map = nullptr;
switch (id) {
case forward_exception_id: {
@ -379,7 +379,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
oop_maps->add_gc_map(call_offset, oop_map);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ cmp_32(R0, 0);
@ -401,7 +401,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
const bool must_gc_arguments = true;
const bool dont_gc_arguments = false;
OopMapSet* oop_maps = NULL;
OopMapSet* oop_maps = nullptr;
bool save_fpu_registers = HaveVFP;
switch (id) {
@ -664,7 +664,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers_without_return(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
}
break;
@ -710,7 +710,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
restore_live_registers_without_return(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
}
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -141,7 +141,7 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
Label done;
ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
// If hdr is NULL, we've got recursive locking and there's nothing more to do
// If hdr is null, we've got recursive locking and there's nothing more to do
cmp(Rmark, 0);
b(done, eq);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,21 +42,21 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// set (empty), R9
// b -1
if (mark == NULL) {
if (mark == nullptr) {
mark = cbuf.insts_mark(); // get mark within main instrs section
}
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
if (base == NULL) {
return NULL; // CodeBuffer::expand failed
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed
}
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark));
InlinedMetadata object_literal(NULL);
InlinedMetadata object_literal(nullptr);
// single instruction, see NativeMovConstReg::next_instruction_address() in
// CompiledStaticCall::set_to_interpreted()
__ ldr_literal(Rmethod, object_literal);
@ -103,7 +103,7 @@ int CompiledStaticCall::to_interp_stub_size() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
guarantee(stub != nullptr, "stub not found");
if (TraceICs) {
ResourceMark rm;
@ -130,7 +130,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
assert(stub != nullptr, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
@ -150,7 +150,7 @@ void CompiledDirectStaticCall::verify() {
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c
inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ThawBase::patch_pd(frame& f, const frame& caller) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
template<typename FKind>
static inline intptr_t** link_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline int ContinuationHelper::frame_align_words(int size) {
@ -42,7 +42,7 @@ inline int ContinuationHelper::frame_align_words(int size) {
inline intptr_t* ContinuationHelper::frame_align_pointer(intptr_t* sp) {
Unimplemented();
return NULL;
return nullptr;
}
template<typename FKind>
@ -75,18 +75,18 @@ inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
inline intptr_t** ContinuationHelper::Frame::callee_link_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
template<typename FKind>
static inline intptr_t* real_fp(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline address* ContinuationHelper::InterpretedFrame::return_pc_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, const frame& caller) {
@ -95,12 +95,12 @@ inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, cons
inline address* ContinuationHelper::Frame::return_pc_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline address ContinuationHelper::Frame::real_pc(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) {
@ -109,22 +109,22 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) {
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, InterpreterOopMap* mask) { // inclusive; this will be copied with the frame
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_bottom(const frame& f) { // exclusive; this will not be copied with the frame
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, int callee_argsize, bool callee_interpreted) {
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::callers_sp(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
#endif // CPU_ARM_CONTINUATIONHELPER_ARM_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@
// the perfect job. In those cases, decode_instruction0 may kick in
// and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) {
static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
return here;
}

View File

@ -72,7 +72,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
bool fp_safe = thread->is_in_stack_range_incl(fp, sp);
if (_cb != NULL ) {
if (_cb != nullptr ) {
// First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod
@ -96,8 +96,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
return fp_safe && is_entry_frame_valid(thread);
}
intptr_t* sender_sp = NULL;
address sender_pc = NULL;
intptr_t* sender_sp = nullptr;
address sender_pc = nullptr;
if (is_interpreted_frame()) {
// fp must be safe
@ -124,7 +124,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
if (sender_pc == nullptr || sender_blob == nullptr) {
return false;
}
@ -210,7 +210,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
if ((address) this->fp()[return_addr_offset] == NULL) return false;
if ((address) this->fp()[return_addr_offset] == nullptr) return false;
// could try and do some more potential verification of native frame if we could think of some...
@ -230,7 +230,7 @@ void frame::patch_pc(Thread* thread, address pc) {
*pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
// leave _pc as is
@ -300,7 +300,7 @@ void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -308,7 +308,7 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL) {
if (jfa->last_Java_pc() != nullptr) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
return fr;
}
@ -358,8 +358,8 @@ void frame::adjust_unextended_sp() {
// simplest way to tell whether we are returning to such a call site
// is as follows:
CompiledMethod* sender_cm = (_cb == NULL) ? NULL : _cb->as_compiled_method_or_null();
if (sender_cm != NULL) {
CompiledMethod* sender_cm = (_cb == nullptr) ? nullptr : _cb->as_compiled_method_or_null();
if (sender_cm != nullptr) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
@ -546,7 +546,7 @@ intptr_t* frame::real_fp() const {
#endif
return new_fp;
}
if (_cb != NULL) {
if (_cb != nullptr) {
// use the frame size if valid
int size = _cb->frame_size();
if (size > 0) {

View File

@ -33,14 +33,14 @@
// Constructors:
inline frame::frame() {
_pc = NULL;
_sp = NULL;
_unextended_sp = NULL;
_fp = NULL;
_cb = NULL;
_pc = nullptr;
_sp = nullptr;
_unextended_sp = nullptr;
_fp = nullptr;
_cb = nullptr;
_deopt_state = unknown;
_on_heap = false;
_oop_map = NULL;
_oop_map = nullptr;
DEBUG_ONLY(_frame_index = -1;)
}
@ -53,13 +53,13 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
_unextended_sp = unextended_sp;
_fp = fp;
_pc = pc;
assert(pc != NULL, "no pc?");
assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
DEBUG_ONLY(_frame_index = -1;)
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
_pc = original_pc;
assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the the compiled method (or must be immediately following it)");
@ -68,7 +68,7 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
_deopt_state = not_deoptimized;
}
_on_heap = false;
_oop_map = NULL;
_oop_map = nullptr;
}
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
@ -80,7 +80,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
}
inline frame::frame(intptr_t* sp, intptr_t* fp) {
assert(sp != NULL, "null SP?");
assert(sp != nullptr, "null SP?");
address pc = (address)(sp[-1]);
init(sp, sp, fp, pc);
}
@ -98,19 +98,19 @@ inline bool frame::equal(frame other) const {
}
// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. NULL represents an invalid (incomparable)
// identity and younger/older relationship. null represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
return this->id() > id ; }
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline intptr_t* frame::link_or_null() const {
intptr_t** ptr = (intptr_t **)addr_at(link_offset);
return os::is_readable_pointer(ptr) ? *ptr : NULL;
return os::is_readable_pointer(ptr) ? *ptr : nullptr;
}
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
@ -159,7 +159,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp();
if (last_sp == NULL ) {
if (last_sp == nullptr ) {
return sp();
} else {
// sp() may have been extended or shrunk by an adapter. At least
@ -203,13 +203,13 @@ PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop*) map->location(R0->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location");
guarantee(result_adr != nullptr, "bad register save location");
return (*result_adr);
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
oop* result_adr = (oop*) map->location(R0->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location");
guarantee(result_adr != nullptr, "bad register save location");
*result_adr = obj;
}
PRAGMA_DIAG_POP
@ -219,17 +219,17 @@ inline int frame::frame_size() const {
}
inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL;
if (_cb->oop_maps() != NULL) {
if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) {
if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc);
}
const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map;
}
return NULL;
return nullptr;
}
inline int frame::compiled_frame_stack_argsize() const {
@ -271,14 +271,14 @@ inline frame frame::sender(RegisterMap* map) const {
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != NULL) return sender_for_compiled_frame(map);
if (_cb != nullptr) return sender_for_compiled_frame(map);
assert(false, "should not be called for a C frame");
return frame();
}
inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
// frame owned by optimizing compiler
assert(_cb->frame_size() > 0, "must have non-zero frame size");
@ -296,7 +296,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (_cb->oop_maps() != NULL) {
if (_cb->oop_maps() != nullptr) {
OopMapSet::update_register_map(this, map);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,11 +210,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
__ b(done, eq);
// crosses regions, storing NULL?
// crosses regions, storing null?
__ cbz(new_val, done);
// storing region crossing non-NULL, is card already dirty?
// storing region crossing non-null, is card already dirty?
const Register card_addr = tmp1;
__ mov_address(tmp2, (address)ct->byte_map_base());
@ -230,7 +230,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ ldrb(tmp2, Address(card_addr));
__ cbz(tmp2, done);
// storing a region crossing, non-NULL oop, card is clean.
// storing a region crossing, non-null oop, card is clean.
// dirty card and log.
__ strb(__ zero_register(tmp2), Address(card_addr));
@ -476,7 +476,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
assert(CardTable::dirty_card_val() == 0, "adjust this code");
__ cbz(r_tmp2, done);
// storing region crossing non-NULL, card is clean.
// storing region crossing non-null, card is clean.
// dirty card and log.
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -206,7 +206,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
Register tmp0 = Rtemp;
Register tmp1 = R5; // must be callee-save register
if (bs_nm == NULL) {
if (bs_nm == nullptr) {
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
//
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls past to check cast
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entr
{ Label L;
ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
cbz(Rtemp, L);
stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL");
stop("InterpreterMacroAssembler::call_VM_helper: last_sp != nullptr");
bind(L);
}
#endif // ASSERT
@ -160,7 +160,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret() {
const Register thread_state = R2_tmp;
ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
cbz(thread_state, L); // if (thread->jvmti_thread_state() == NULL) exit;
cbz(thread_state, L); // if (thread->jvmti_thread_state() == nullptr) exit;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
@ -1028,7 +1028,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter");
Label set_mdp;
// Test MDO to avoid the call if it is NULL.
// Test MDO to avoid the call if it is null.
ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
cbz(Rtemp, set_mdp);
@ -1360,7 +1360,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
}
// In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL.
// observed the receiver[start_row] is null.
// Fill in the receiver field and increment the count.
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));

View File

@ -144,7 +144,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void empty_expression_stack() {
ldr(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
check_stack_top();
// NULL last_sp until next java call
// null last_sp until next java call
str(zero_register(Rtemp), Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -263,7 +263,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
virtual void pass_object() {
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
*_to++ = (*(intptr_t*)from_addr == 0) ? (intptr_t)NULL : from_addr;
*_to++ = (*(intptr_t*)from_addr == 0) ? (intptr_t)nullptr : from_addr;
_from -= Interpreter::stackElementSize;
}
@ -306,9 +306,9 @@ class SlowSignatureHandler: public NativeSignatureIterator {
virtual void pass_object() {
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
if(_last_gp < GPR_PARAMS) {
_toGP[_last_gp++] = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
_toGP[_last_gp++] = (*(intptr_t*)from_addr == 0) ? nullptr : from_addr;
} else {
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
*_to++ = (*(intptr_t*)from_addr == 0) ? nullptr : from_addr;
}
_from -= Interpreter::stackElementSize;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,10 @@ public:
void clear(void) {
// clearing _last_Java_sp must be first
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_last_Java_fp = nullptr;
_last_Java_pc = nullptr;
}
void copy(JavaFrameAnchor* src) {
@ -50,11 +50,11 @@ public:
// We must clear _last_Java_sp before copying the rest of the new data
//
// Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
// To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing
//
if (_last_Java_sp != src->_last_Java_sp)
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
_last_Java_fp = src->_last_Java_fp;
_last_Java_pc = src->_last_Java_pc;

View File

@ -38,8 +38,8 @@
#define BUFFER_SIZE 120
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char* name = NULL;
address slow_case_addr = NULL;
const char* name = nullptr;
address slow_case_addr = nullptr;
switch (type) {
case T_BOOLEAN:
name = "jni_fast_GetBooleanField";
@ -220,7 +220,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
ShouldNotReachHere();
return NULL;
return nullptr;
}
address JNI_FastGetField::generate_fast_get_boolean_field() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL;
return nullptr;
}
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {

View File

@ -107,8 +107,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
Register temp_reg3,
Label& L_success) {
Label L_failure;
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, temp_reg2, &L_success, &L_failure, NULL);
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, temp_reg2, temp_reg3, &L_success, NULL);
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, temp_reg2, &L_success, &L_failure, nullptr);
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, temp_reg2, temp_reg3, &L_success, nullptr);
bind(L_failure);
};
@ -125,10 +125,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one null in the batch");
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
@ -205,9 +205,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one null in the batch");
// a couple of useful fields in sub_klass:
int ss_offset = in_bytes(Klass::secondary_supers_offset());
@ -630,7 +630,7 @@ void MacroAssembler::mov_oop(Register rd, jobject o, int oop_index,
AsmCondition cond
) {
if (o == NULL) {
if (o == nullptr) {
mov(rd, 0, cond);
return;
}
@ -651,7 +651,7 @@ void MacroAssembler::mov_oop(Register rd, jobject o, int oop_index,
}
void MacroAssembler::mov_metadata(Register rd, Metadata* o, int metadata_index) {
if (o == NULL) {
if (o == nullptr) {
mov(rd, 0);
return;
}
@ -842,7 +842,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
block_comment(buffer);
}
#endif
const char* msg_buffer = NULL;
const char* msg_buffer = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -884,7 +884,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
if (!VerifyOops) return;
const char* msg_buffer = NULL;
const char* msg_buffer = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -940,7 +940,7 @@ void MacroAssembler::null_check(Register reg, Register tmp, int offset) {
if (tmp == noreg) {
tmp = Rtemp;
assert((! Thread::current()->is_Compiler_thread()) ||
(! (ciEnv::current()->task() == NULL)) ||
(! (ciEnv::current()->task() == nullptr)) ||
(! (ciEnv::current()->comp_level() == CompLevel_full_optimization)),
"Rtemp not available in C2"); // explicit tmp register required
// XXX: could we mark the code buffer as not compatible with C2 ?
@ -1104,7 +1104,7 @@ void MacroAssembler::debug(const char* msg, const intx* registers) {
}
void MacroAssembler::unimplemented(const char* what) {
const char* buf = NULL;
const char* buf = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -1249,7 +1249,7 @@ void MacroAssembler::cas_for_lock_release(Register oldval, Register newval,
// Preserves flags and all registers.
// On SMP the updated value might not be visible to external observers without a synchronization barrier
void MacroAssembler::cond_atomic_inc32(AsmCondition cond, int* counter_addr) {
if (counter_addr != NULL) {
if (counter_addr != nullptr) {
InlinedAddress counter_addr_literal((address)counter_addr);
Label done, retry;
if (cond != al) {
@ -1286,7 +1286,7 @@ void MacroAssembler::resolve_jobject(Register value,
assert_different_registers(value, tmp1, tmp2);
Label done, tagged, weak_tagged;
cbz(value, done); // Use NULL as-is.
cbz(value, done); // Use null as-is.
tst(value, JNIHandles::tag_mask); // Test for tag.
b(tagged, ne);
@ -1319,7 +1319,7 @@ void MacroAssembler::resolve_global_jobject(Register value,
assert_different_registers(value, tmp1, tmp2);
Label done;
cbz(value, done); // Use NULL as-is.
cbz(value, done); // Use null as-is.
#ifdef ASSERT
{

View File

@ -55,7 +55,7 @@ class AddressLiteral {
// creation
AddressLiteral()
: _is_lval(false),
_target(NULL)
_target(nullptr)
{}
public:
@ -285,7 +285,7 @@ public:
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// One of the three labels can be null, meaning take the fall-through.
// No registers are killed, except temp_regs.
void check_klass_subtype_fast_path(Register sub_klass,
Register super_klass,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,7 +194,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all require an extra argument.
__ should_not_reach_here(); // empty stubs make SG sick
return NULL;
return nullptr;
}
// Rmethod: Method*
@ -485,8 +485,8 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* saved_bp,
oop mh) {
// called as a leaf from native code: do not block the JVM!
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
bool has_mh = (strstr(adaptername, "/static") == nullptr &&
strstr(adaptername, "linkTo") == nullptr); // static linkers don't have MH
intptr_t* entry_sp = (intptr_t*) &saved_regs[trace_mh_nregs]; // just after the saved regs
intptr_t* saved_sp = (intptr_t*) saved_regs[Rsender_sp->encoding()]; // save of Rsender_sp
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
@ -501,7 +501,7 @@ void trace_method_handle_stub(const char* adaptername,
adaptername, mh_reg_name, mh_reg,
(intptr_t)entry_sp, (intptr_t)saved_sp - (intptr_t)entry_sp, (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp && last_sp != NULL) {
if (last_sp != saved_sp && last_sp != nullptr) {
log_info(methodhandles)("*** last_sp=" INTPTR_FORMAT, p2i(last_sp));
}
LogTarget(Trace, methodhandles) lt;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -134,12 +134,12 @@ intptr_t NativeMovConstReg::data() const {
void NativeMovConstReg::set_data(intptr_t x, address pc) {
// Find and replace the oop corresponding to this instruction in oops section
RawNativeInstruction* next = next_raw();
oop* oop_addr = NULL;
Metadata** metadata_addr = NULL;
oop* oop_addr = nullptr;
Metadata** metadata_addr = nullptr;
CodeBlob* cb = CodeCache::find_blob(instruction_address());
if (cb != NULL) {
if (cb != nullptr) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm != nullptr) {
RelocIterator iter(nm, instruction_address(), next->instruction_address());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
@ -162,7 +162,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
unsigned int hi = (unsigned int)(x >> 16);
this->set_encoding((this->encoding() & 0xfff0f000) | (lo & 0xf000) << 4 | (lo & 0xfff));
next->set_encoding((next->encoding() & 0xfff0f000) | (hi & 0xf000) << 4 | (hi & 0xfff));
} else if (oop_addr == NULL & metadata_addr == NULL) {
} else if (oop_addr == nullptr & metadata_addr == nullptr) {
// A static ldr_literal (without oop or metadata relocation)
assert(is_ldr_literal(), "must be");
int offset = ldr_offset();
@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
// data is loaded from oop or metadata section
int offset;
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr;
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
if(pc == 0) {
offset = addr - instruction_address() - 8;
@ -303,9 +303,9 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
static address raw_call_for(address return_address) {
CodeBlob* cb = CodeCache::find_blob(return_address);
nmethod* nm = cb->as_nmethod_or_null();
if (nm == NULL) {
if (nm == nullptr) {
ShouldNotReachHere();
return NULL;
return nullptr;
}
// Look back 4 instructions, to allow for ic_call
address begin = MAX2(return_address - 4*NativeInstruction::instruction_size, nm->code_begin());
@ -324,16 +324,16 @@ static address raw_call_for(address return_address) {
}
}
}
return NULL;
return nullptr;
}
bool RawNativeCall::is_call_before(address return_address) {
return (raw_call_for(return_address) != NULL);
return (raw_call_for(return_address) != nullptr);
}
NativeCall* rawNativeCall_before(address return_address) {
address call = raw_call_for(return_address);
assert(call != NULL, "must be");
assert(call != nullptr, "must be");
return nativeCall_at(call);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -448,7 +448,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
if (nop->check()) {
return nop;
}
return NULL;
return nullptr;
}
class NativeDeoptInstruction: public NativeInstruction {
@ -464,7 +464,7 @@ public:
void verify();
static bool is_deopt_at(address instr) {
assert(instr != NULL, "");
assert(instr != nullptr, "");
uint32_t value = *(uint32_t *) instr;
return value == 0xe7fdecfa;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,10 +31,10 @@
private:
// This is the hook for finding a register in an "well-known" location,
// such as a register block of a predetermined format.
// Since there is none, we just return NULL.
// Since there is none, we just return null.
// See registerMap_sparc.hpp for an example of grabbing registers
// from register save areas of a standard layout.
address pd_location(VMReg reg) const {return NULL;}
address pd_location(VMReg reg) const {return nullptr;}
address pd_location(VMReg base_reg, int slot_idx) const {
return location(base_reg->next(slot_idx), nullptr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ address Relocation::pd_call_destination(address orig_addr) {
address pc = addr();
int adj = 0;
if (orig_addr != NULL) {
if (orig_addr != nullptr) {
// We just moved this call instruction from orig_addr to addr().
// This means that, when relative, its target will appear to have grown by addr() - orig_addr.
adj = orig_addr - pc;
@ -69,7 +69,7 @@ address Relocation::pd_call_destination(address orig_addr) {
return rawNativeJump_at(pc)->jump_destination(adj);
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
void Relocation::pd_set_call_destination(address x) {

View File

@ -256,7 +256,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "not needed on arm");
assert(regs2 == nullptr, "not needed on arm");
int slot = 0;
int ireg = 0;
@ -367,7 +367,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
int total_args_passed) {
#ifdef __SOFTFP__
// soft float is the same as the C calling convention.
return c_calling_convention(sig_bt, regs, NULL, total_args_passed);
return c_calling_convention(sig_bt, regs, nullptr, total_args_passed);
#endif // __SOFTFP__
int slot = 0;
int ireg = 0;
@ -771,7 +771,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
(OopMapSet*)nullptr);
}
// Arguments for JNI method include JNIEnv and Class if static
@ -796,7 +796,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
out_sig_bt[argc++] = in_sig_bt[i];
}
int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Since object arguments need to be wrapped, we must preserve space
// for those object arguments which come in registers (GPR_PARAMS maximum)
@ -875,7 +875,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ sub_slow(SP, SP, stack_size - 2*wordSize);
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
assert(bs != NULL, "Sanity");
assert(bs != nullptr, "Sanity");
bs->nmethod_entry_barrier(masm);
int frame_complete = __ pc() - start;
@ -1397,7 +1397,7 @@ void SharedRuntime::generate_deopt_blob() {
// exception_in_tls_offset entry point.
__ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
__ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
// Force return value to NULL to avoid confusing the escape analysis
// Force return value to null to avoid confusing the escape analysis
// logic. Everything is dead here anyway.
__ mov(R0, 0);
@ -1732,7 +1732,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ pop(RegisterSet(FP) | RegisterSet(PC));
masm->flush();
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, 2 /* LR+FP */);
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, nullptr, 2 /* LR+FP */);
}
#endif // COMPILER2
@ -1744,7 +1744,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// a safepoint.
//
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
ResourceMark rm;
CodeBuffer buffer("handler_blob", 256, 256);
@ -1814,7 +1814,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
}
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
ResourceMark rm;
CodeBuffer buffer(name, 1000, 512);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ public:
inline address location(VMReg reg, intptr_t* sp) const {
Unimplemented();
return NULL;
return nullptr;
}
inline void set_location(VMReg reg, address loc) { assert_is_rfp(reg); }
@ -77,7 +77,7 @@ public:
bool should_skip_missing() const { return false; }
VMReg find_register_spilled_here(void* p, intptr_t* sp) {
Unimplemented();
return NULL;
return nullptr;
}
void print() const { print_on(tty); }
void print_on(outputStream* st) const { st->print_cr("Small register map"); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,25 +46,25 @@ inline frame StackChunkFrameStream<frame_kind>::to_frame() const {
template <ChunkFrames frame_kind>
inline address StackChunkFrameStream<frame_kind>::get_pc() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::fp() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::derelativize(int offset) const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::unextended_sp_for_interpreter_frame() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>

View File

@ -816,7 +816,7 @@ class StubGenerator: public StubCodeGenerator {
__ str_32(tmp2, Address(tmp1));
// make sure object is 'reasonable'
__ cbz(oop, exit); // if obj is NULL it is ok
__ cbz(oop, exit); // if obj is null it is ok
// Check if the oop is in the right area of memory
// Note: oop_mask and oop_bits must be updated if the code is saved/reused
@ -830,7 +830,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure klass is 'reasonable'
__ load_klass(klass, oop); // get klass
__ cbz(klass, error); // if klass is NULL it is broken
__ cbz(klass, error); // if klass is null it is broken
// return if everything seems ok
__ bind(exit);
@ -870,11 +870,11 @@ class StubGenerator: public StubCodeGenerator {
// input registers are preserved
//
void array_overlap_test(address no_overlap_target, int log2_elem_size, Register tmp1, Register tmp2) {
assert(no_overlap_target != NULL, "must be generated");
array_overlap_test(no_overlap_target, NULL, log2_elem_size, tmp1, tmp2);
assert(no_overlap_target != nullptr, "must be generated");
array_overlap_test(no_overlap_target, nullptr, log2_elem_size, tmp1, tmp2);
}
void array_overlap_test(Label& L_no_overlap, int log2_elem_size, Register tmp1, Register tmp2) {
array_overlap_test(NULL, &L_no_overlap, log2_elem_size, tmp1, tmp2);
array_overlap_test(nullptr, &L_no_overlap, log2_elem_size, tmp1, tmp2);
}
void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size, Register tmp1, Register tmp2) {
const Register from = R0;
@ -892,12 +892,12 @@ class StubGenerator: public StubCodeGenerator {
if (log2_elem_size != 0) {
__ mov(byte_count, AsmOperand(count, lsl, log2_elem_size));
}
if (NOLp == NULL)
if (NOLp == nullptr)
__ b(no_overlap_target,lo);
else
__ b((*NOLp), lo);
__ cmp(to_from, byte_count);
if (NOLp == NULL)
if (NOLp == nullptr)
__ b(no_overlap_target, ge);
else
__ b((*NOLp), ge);
@ -1974,7 +1974,7 @@ class StubGenerator: public StubCodeGenerator {
return &SharedRuntime::_jlong_array_copy_ctr;
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
#endif // !PRODUCT
@ -1998,7 +1998,7 @@ class StubGenerator: public StubCodeGenerator {
// to: R1
// count: R2 treated as signed 32-bit int
//
address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = NULL) {
address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = nullptr) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@ -2016,7 +2016,7 @@ class StubGenerator: public StubCodeGenerator {
__ zap_high_non_significant_bits(R2);
if (!disjoint) {
assert (nooverlap_target != NULL, "must be specified for conjoint case");
assert (nooverlap_target != nullptr, "must be specified for conjoint case");
array_overlap_test(nooverlap_target, exact_log2(bytes_per_count), tmp1, tmp2);
}
@ -2172,7 +2172,7 @@ class StubGenerator: public StubCodeGenerator {
// to: R1
// count: R2 treated as signed 32-bit int
//
address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = NULL) {
address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = nullptr) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@ -2191,7 +2191,7 @@ class StubGenerator: public StubCodeGenerator {
__ zap_high_non_significant_bits(R2);
if (!disjoint) {
assert (nooverlap_target != NULL, "must be specified for conjoint case");
assert (nooverlap_target != nullptr, "must be specified for conjoint case");
array_overlap_test(nooverlap_target, LogBytesPerHeapOop, tmp1, tmp2);
}
@ -2508,7 +2508,7 @@ class StubGenerator: public StubCodeGenerator {
// ======== loop entry is here ========
__ BIND(load_element);
__ load_heap_oop(R5, Address(from, BytesPerHeapOop, post_indexed)); // load the oop
__ cbz(R5, store_element); // NULL
__ cbz(R5, store_element); // null
__ load_klass(R6, R5);
@ -2640,20 +2640,20 @@ class StubGenerator: public StubCodeGenerator {
// (2) src_pos must not be negative.
// (3) dst_pos must not be negative.
// (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL.
// (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst.
BLOCK_COMMENT("arraycopy initial argument checks");
// if (src == NULL) return -1;
// if (src == nullptr) return -1;
__ cbz(src, L_failed);
// if (src_pos < 0) return -1;
__ cmp_32(src_pos, 0);
__ b(L_failed, lt);
// if (dst == NULL) return -1;
// if (dst == nullptr) return -1;
__ cbz(dst, L_failed);
// if (dst_pos < 0) return -1;
@ -3140,7 +3140,7 @@ class StubGenerator: public StubCodeGenerator {
generate_arraycopy_stubs();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
if (bs_nm != nullptr) {
StubRoutines::Arm::_method_entry_barrier = generate_method_entry_barrier();
}
@ -3171,7 +3171,7 @@ class StubGenerator: public StubCodeGenerator {
#define UCM_TABLE_MAX_ENTRIES 32
void StubGenerator_generate(CodeBuffer* code, int phase) {
if (UnsafeCopyMemory::_table == NULL) {
if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, phase);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,11 +27,11 @@
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
address StubRoutines::Arm::_idiv_irem_entry = NULL;
address StubRoutines::Arm::_idiv_irem_entry = nullptr;
address StubRoutines::Arm::_partial_subtype_check = NULL;
address StubRoutines::Arm::_partial_subtype_check = nullptr;
address StubRoutines::_atomic_load_long_entry = NULL;
address StubRoutines::_atomic_store_long_entry = NULL;
address StubRoutines::_atomic_load_long_entry = nullptr;
address StubRoutines::_atomic_store_long_entry = nullptr;
address StubRoutines::Arm::_method_entry_barrier = NULL;
address StubRoutines::Arm::_method_entry_barrier = nullptr;

View File

@ -122,9 +122,9 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
}
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
if (!InlineIntrinsics) return nullptr; // Generate a vanilla entry
address entry_point = NULL;
address entry_point = nullptr;
Register continuation = LR;
bool use_runtime_call = false;
switch (kind) {
@ -181,7 +181,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
ShouldNotReachHere();
}
if (entry_point != NULL) {
if (entry_point != nullptr) {
__ mov(SP, Rsender_sp);
if (use_runtime_call) {
__ mov(Rtmp_save0, LR);
@ -227,7 +227,7 @@ void TemplateInterpreterGenerator::generate_math_runtime_call(AbstractInterprete
break;
default:
ShouldNotReachHere();
fn = NULL; // silence "maybe uninitialized" compiler warnings
fn = nullptr; // silence "maybe uninitialized" compiler warnings
}
__ call_VM_leaf(fn);
}
@ -307,7 +307,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
}
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both");
assert(!pass_oop || message == nullptr, "either oop or message but not both");
address entry = __ pc();
InlinedString Lname(name);
@ -327,7 +327,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
if (pass_oop) {
__ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2);
} else {
if (message != NULL) {
if (message != nullptr) {
__ ldr_literal(R2, Lmessage);
} else {
__ mov(R2, 0);
@ -341,7 +341,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
__ nop(); // to avoid filling CPU pipeline with invalid instructions
__ nop();
__ bind_literal(Lname);
if (!pass_oop && (message != NULL)) {
if (!pass_oop && (message != nullptr)) {
__ bind_literal(Lmessage);
}
@ -355,7 +355,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Restore stack bottom in case i2c adjusted stack
__ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that SP is now tos until next java call
// and null it as marker that SP is now tos until next java call
__ mov(Rtemp, (int)NULL_WORD);
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
@ -389,7 +389,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
// The stack is not extended by deopt but we must NULL last_sp as this
// The stack is not extended by deopt but we must null last_sp as this
// entry is like a "return".
__ mov(Rtemp, 0);
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
@ -408,7 +408,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ bind(L);
}
if (continuation == NULL) {
if (continuation == nullptr) {
__ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
@ -497,8 +497,8 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
// indicating if the counter overflow occurs at a backwards branch (non-null bcp).
// The call returns the address of the verified entry point for the method or null
// if the compilation did not complete (either went background or bailed out).
__ mov(R1, (int)false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
@ -757,7 +757,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
const int referent_offset = java_lang_ref_Reference::referent_offset();
// Check if local 0 != NULL
// Check if local 0 != nullptr
// If the receiver is null then it is OK to jump to the slow path.
__ ldr(Rthis, Address(Rparams));
__ cbz(Rthis, slow_path);
@ -780,9 +780,9 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
}
// Not supported
address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; }
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return nullptr; }
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return nullptr; }
//
// Interpreter stub for calling a native method. (asm interpreter)
@ -1430,7 +1430,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ b(L_done, ne);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
// get local0
__ ldr(R1, Address(Rlocals, 0));
@ -1621,7 +1621,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// Call a little run-time stub to avoid blow-up for each bytecode.
// The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL,
assert(Interpreter::trace_code(t->tos_in()) != nullptr,
"entry must have been generated");
address trace_entry = Interpreter::trace_code(t->tos_in());
__ call(trace_entry, relocInfo::none);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -166,7 +166,7 @@ AsmCondition convNegCond(TemplateTable::Condition cc) {
//----------------------------------------------------------------------------------------------------
// Miscellaneous helper routines
// Store an oop (or NULL) at the address described by obj.
// Store an oop (or null) at the address described by obj.
// Blows all volatile registers R0-R3, Rtemp, LR).
// Also destroys new_val and obj.base().
static void do_oop_store(InterpreterMacroAssembler* _masm,
@ -462,7 +462,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ resolve_oop_handle(tmp);
__ cmp(result, tmp);
__ b(notNull, ne);
__ mov(result, 0); // NULL object reference
__ mov(result, 0); // null object reference
__ bind(notNull);
}
@ -1224,7 +1224,7 @@ void TemplateTable::aastore() {
// Compute the array base
__ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// do array store check - check for NULL value first
// do array store check - check for null value first
__ cbz(Rvalue_2, is_null);
// Load subklass
@ -1251,11 +1251,11 @@ void TemplateTable::aastore() {
// object is at TOS
__ b(Interpreter::_throw_ArrayStoreException_entry);
// Have a NULL in Rvalue_2, store NULL at array[index].
// Have a null in Rvalue_2, store null at array[index].
__ bind(is_null);
__ profile_null_seen(R0_tmp);
// Store a NULL
// Store a null
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
// Pop stack arguments
@ -2121,7 +2121,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ sub(R1, Rbcp, Rdisp); // branch bcp
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
// R0: osr nmethod (osr ok) or NULL (osr not possible)
// R0: osr nmethod (osr ok) or null (osr not possible)
const Register Rnmethod = R0;
__ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode
@ -2675,14 +2675,14 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
__ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
if (is_static) {
__ mov(R1, 0); // NULL object reference
__ mov(R1, 0); // null object reference
} else {
__ pop(atos); // Get the object
__ mov(R1, R0_tos);
__ verify_oop(R1);
__ push(atos); // Restore stack state
}
// R1: object pointer or NULL
// R1: object pointer or null
// R2: cache entry pointer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
R1, R2);
@ -2991,7 +2991,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool
// object (tos)
__ mov(R3, Rstack_top);
// R1: object pointer set up above (NULL if static)
// R1: object pointer set up above (null if static)
// R2: cache entry pointer
// R3: value object on the stack
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
@ -4065,7 +4065,7 @@ void TemplateTable::checkcast() {
// Come here on success
// Collect counts on whether this check-cast sees NULLs a lot or not.
// Collect counts on whether this check-cast sees nulls a lot or not.
if (ProfileInterpreter) {
__ b(done);
__ bind(is_null);
@ -4078,8 +4078,8 @@ void TemplateTable::checkcast() {
void TemplateTable::instanceof() {
// result = 0: obj == NULL or obj is not an instanceof the specified klass
// result = 1: obj != NULL and obj is an instanceof the specified klass
// result = 0: obj == nullptr or obj is not an instanceof the specified klass
// result = 1: obj != nullptr and obj is an instanceof the specified klass
transition(atos, itos);
Label done, is_null, not_subtype, quicked, resolved;
@ -4136,7 +4136,7 @@ void TemplateTable::instanceof() {
__ profile_typecheck_failed(R1_tmp);
__ mov(R0_tos, 0);
// Collect counts on whether this test sees NULLs a lot or not.
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
__ b(done);
__ bind(is_null);
@ -4211,7 +4211,7 @@ void TemplateTable::monitorenter() {
const Register Robj = R0_tos;
const Register Rentry = R1_tmp;
// check for NULL object
// check for null object
__ null_check(Robj, Rtemp);
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
@ -4219,7 +4219,7 @@ void TemplateTable::monitorenter() {
Label allocate_monitor, allocated;
// initialize entry pointer
__ mov(Rentry, 0); // points to free slot or NULL
__ mov(Rentry, 0); // points to free slot or null
// find a free slot in the monitor block (result in Rentry)
{ Label loop, exit;
@ -4322,7 +4322,7 @@ void TemplateTable::monitorexit() {
const Register Rcur_obj = Rtemp;
const Register Rmonitor = R0; // fixed in unlock_object()
// check for NULL object
// check for null object
__ null_check(Robj, Rtemp);
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,7 +140,7 @@ void VM_Version::initialize() {
// Making this stub must be FIRST use of assembler
const int stub_size = 128;
BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
if (stub_blob == NULL) {
if (stub_blob == nullptr) {
vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,9 +51,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
@ -121,9 +121,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
@ -203,7 +203,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// We force resolving of the call site by jumping to the "handle
// wrong method" stub, and so let the interpreter runtime do all the
// dirty work.
assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
__ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
masm->flush();