8301497: Replace NULL with nullptr in cpu/s390

Reviewed-by: amitkumar, coleenp
This commit is contained in:
Johan Sjölen 2023-04-18 08:59:31 +00:00
parent 8ecb5dfa34
commit 54f7b6ca34
44 changed files with 452 additions and 452 deletions

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -121,17 +121,17 @@ int AbstractInterpreter::size_activation(int max_stack,
//
// Parameters:
//
// interpreter_frame != NULL:
// interpreter_frame isn't null:
// set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the
// The frame interpreter_frame, if not null, is guaranteed to be the
// right size, as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state
//
// is_top_frame == true:
// is_top_frame is true:
// We're processing the *oldest* interpreter frame!
//
// pop_frame_extra_args:
// If this is != 0 we are returning to a deoptimized frame by popping
// If this isn't 0 we are returning to a deoptimized frame by popping
// off the callee frame. We want to re-execute the call that called the
// callee interpreted, but since the return to the interpreter would pop
// the arguments off advance the esp by dummy popframe_extra_args slots.

@ -137,7 +137,7 @@ class RelAddr {
assert(((uint64_t)target & 0x0001L) == 0, "target of a relative address must be aligned");
assert(((uint64_t)pc & 0x0001L) == 0, "origin of a relative address must be aligned");
if ((target == NULL) || (target == pc)) {
if ((target == nullptr) || (target == pc)) {
return 0; // Yet unknown branch destination.
} else {
guarantee(is_in_range_of_RelAddr(target, pc, shortForm), "target not within reach");
@ -295,7 +295,7 @@ class AddressLiteral {
protected:
// creation
AddressLiteral() : _address(NULL), _rspec() {}
AddressLiteral() : _address(nullptr), _rspec() {}
public:
AddressLiteral(address addr, RelocationHolder const& rspec)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -375,7 +375,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
address target = nullptr;
relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -143,7 +143,7 @@ void LIR_Assembler::osr_entry() {
for (int i = 0; i < number_of_locks; i++) {
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
// Verify the interpreter's monitor has a non-null object.
__ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__);
__ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__);
// Copy the lock field into the compiled activation.
__ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf);
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i));
@ -158,7 +158,7 @@ void LIR_Assembler::osr_entry() {
address LIR_Assembler::emit_call_c(address a) {
__ align_call_far_patchable(__ pc());
address call_addr = __ call_c_opt(a);
if (call_addr == NULL) {
if (call_addr == nullptr) {
bailout("const section overflow");
}
return call_addr;
@ -167,7 +167,7 @@ address LIR_Assembler::emit_call_c(address a) {
int LIR_Assembler::emit_exception_handler() {
// Generate code for exception handler.
address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
// Not enough space left for the handler.
bailout("exception handler overflow");
return -1;
@ -213,7 +213,7 @@ int LIR_Assembler::emit_unwind_handler() {
}
// Perform needed unlocking.
MonitorExitStub* stub = NULL;
MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) {
// Runtime1::monitorexit_id expects lock address in Z_R1_scratch.
LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch);
@ -248,7 +248,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ z_br(Z_R5);
// Emit the slow path assembly.
if (stub != NULL) {
if (stub != nullptr) {
stub->emit_code(this);
}
@ -258,7 +258,7 @@ int LIR_Assembler::emit_unwind_handler() {
int LIR_Assembler::emit_deopt_handler() {
// Generate code for exception handler.
address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
// Not enough space left for the handler.
bailout("deopt handler overflow");
return -1;
@ -273,7 +273,7 @@ int LIR_Assembler::emit_deopt_handler() {
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
if (o == NULL) {
if (o == nullptr) {
__ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
} else {
AddressLiteral a = __ allocate_oop_address(o);
@ -286,12 +286,12 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched.
int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
int oop_index = __ oop_recorder()->allocate_oop_index(nullptr);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
// The NULL will be dynamically patched later so the sequence to
// The null will be dynamically patched later so the sequence to
// load the address literal must not be optimized.
__ load_const(reg, addrlit);
@ -308,11 +308,11 @@ void LIR_Assembler::metadata2reg(Metadata* md, Register reg) {
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the klass once it's been patched.
int index = __ oop_recorder()->allocate_metadata_index(NULL);
int index = __ oop_recorder()->allocate_metadata_index(nullptr);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index));
assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
// The NULL will be dynamically patched later so the sequence to
// The null will be dynamically patched later so the sequence to
// load the address literal must not be optimized.
__ load_const(reg, addrlit);
@ -353,18 +353,18 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
#ifdef ASSERT
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
if (op->block() != NULL) { _branch_target_blocks.append(op->block()); }
if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); }
assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
if (op->block() != nullptr) { _branch_target_blocks.append(op->block()); }
if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); }
#endif
if (op->cond() == lir_cond_always) {
if (op->info() != NULL) { add_debug_info_for_branch(op->info()); }
if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); }
__ branch_optimized(Assembler::bcondAlways, *(op->label()));
} else {
Assembler::branch_condition acond = Assembler::bcondZero;
if (op->code() == lir_cond_float_branch) {
assert(op->ublock() != NULL, "must have unordered successor");
assert(op->ublock() != nullptr, "must have unordered successor");
__ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label()));
}
switch (op->cond()) {
@ -504,7 +504,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
}
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
address virtual_call_oop_addr = NULL;
address virtual_call_oop_addr = nullptr;
AddressLiteral empty_ic((address) Universe::non_oop_word());
virtual_call_oop_addr = __ pc();
bool success = __ load_const_from_toc(Z_inline_cache, empty_ic);
@ -546,7 +546,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
case T_OBJECT:
dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
if (c->as_jobject() == NULL) {
if (c->as_jobject() == nullptr) {
__ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8);
} else {
jobject2reg(c->as_jobject(), Z_R1_scratch);
@ -596,7 +596,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (c->as_jobject() == nullptr) {
if (UseCompressedOops && !wide) {
__ clear_reg(Z_R1_scratch, false);
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
@ -666,7 +666,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (c->as_jobject() == nullptr) {
if (UseCompressedOops && !wide) {
store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
} else {
@ -709,7 +709,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
}
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check(store_offset, info);
}
}
@ -760,7 +760,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
Register toc_reg = Z_R1_scratch;
__ load_toc(toc_reg);
address const_addr = __ float_constant(c->as_jfloat());
if (const_addr == NULL) {
if (const_addr == nullptr) {
bailout("const section overflow");
break;
}
@ -778,7 +778,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
Register toc_reg = Z_R1_scratch;
__ load_toc(toc_reg);
address const_addr = __ double_constant(c->as_jdouble());
if (const_addr == NULL) {
if (const_addr == nullptr) {
bailout("const section overflow");
break;
}
@ -881,7 +881,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P
__ verify_oop(src, FILE_AND_LINE);
}
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(!to_reg->is_double_cpu() ||
@ -969,10 +969,10 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P
default : ShouldNotReachHere();
}
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, patch_code, src, info);
}
if (info != NULL) add_debug_info_for_null_check(offset, info);
if (info != nullptr) add_debug_info_for_null_check(offset, info);
}
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
@ -1074,7 +1074,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
__ verify_oop(dest, FILE_AND_LINE);
}
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(!from->is_double_cpu() ||
@ -1176,11 +1176,11 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
default: ShouldNotReachHere();
}
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, patch_code, dest, info);
}
if (info != NULL) add_debug_info_for_null_check(offset, info);
if (info != nullptr) add_debug_info_for_null_check(offset, info);
}
@ -1211,7 +1211,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
const Register poll_addr = tmp->as_register_lo();
__ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset()));
guarantee(info != NULL, "Shouldn't be NULL");
guarantee(info != nullptr, "Shouldn't be null");
add_debug_info_for_branch(info);
int offset = __ offset();
__ relocate(relocInfo::poll_type);
@ -1226,7 +1226,7 @@ void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) {
if (stub == nullptr) {
bailout("static call stub overflow");
return;
}
@ -1236,7 +1236,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
// See also Matcher::interpreter_method_reg().
AddressLiteral meta = __ allocate_metadata_address(NULL);
AddressLiteral meta = __ allocate_metadata_address(nullptr);
bool success = __ load_const_from_toc(Z_method, meta);
__ set_inst_mark();
@ -1289,10 +1289,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ z_cfi(reg1, c->as_jint());
}
} else if (c->type() == T_METADATA) {
// We only need, for now, comparison with NULL for metadata.
// We only need, for now, comparison with null for metadata.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata();
if (m == NULL) {
if (m == nullptr) {
__ z_cghi(reg1, 0);
} else {
ShouldNotReachHere();
@ -1300,7 +1300,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
} else if (is_reference_type(c->type())) {
// In 64bit oops are single register.
jobject o = c->as_jobject();
if (o == NULL) {
if (o == nullptr) {
__ z_ltgr(reg1, reg1);
} else {
jobject2reg(o, Z_R1_scratch);
@ -1311,7 +1311,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
}
// cpu register - address
} else if (opr2->is_address()) {
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
if (unsigned_comp) {
@ -1449,7 +1449,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr1->is_stack()) {
stack2reg(opr1, result, result->type());
} else if (opr1->is_constant()) {
const2reg(opr1, result, lir_patch_none, NULL);
const2reg(opr1, result, lir_patch_none, nullptr);
} else {
ShouldNotReachHere();
}
@ -1478,7 +1478,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr2->is_stack()) {
stack2reg(opr2, result, result->type());
} else if (opr2->is_constant()) {
const2reg(opr2, result, lir_patch_none, NULL);
const2reg(opr2, result, lir_patch_none, nullptr);
} else {
ShouldNotReachHere();
}
@ -1488,7 +1488,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
if (left->is_single_cpu()) {
assert(left == dest, "left and dest must be equal");
@ -1935,14 +1935,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
// If we don't know anything, just go through the generic arraycopy.
if (default_type == NULL) {
if (default_type == nullptr) {
address copyfunc_addr = StubRoutines::generic_arraycopy();
if (copyfunc_addr == NULL) {
if (copyfunc_addr == nullptr) {
// Take a slow path for generic arraycopy.
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
__ bind(*stub->continuation());
@ -2007,7 +2007,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
return;
}
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
int shift_amount;
@ -2037,7 +2037,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// Length and pos's are all sign extended at this point on 64bit.
// test for NULL
// test for null
if (flags & LIR_OpArrayCopy::src_null_check) {
__ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry());
}
@ -2115,7 +2115,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ load_klass(src_klass, src);
__ load_klass(dst_klass, dst);
__ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL);
__ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr);
store_parameter(src_klass, 0); // sub
store_parameter(dst_klass, 1); // super
@ -2127,7 +2127,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ bind(slow);
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
if (copyfunc_addr != NULL) { // use stub if available
if (copyfunc_addr != nullptr) { // use stub if available
// Src is not a sub class of dst so we have to do a
// per-element check.
@ -2456,17 +2456,17 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
assert(!op->tmp3()->is_valid(), "tmp3's not needed");
// Check if it needs to be profiled.
ciMethodData* md = NULL;
ciProfileData* data = NULL;
ciMethodData* md = nullptr;
ciProfileData* data = nullptr;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
assert(method != nullptr, "Should have method");
int bci = op->profiled_bci();
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check");
assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
@ -2527,8 +2527,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ load_klass(klass_RInfo, obj);
// Perform the fast part of the checking logic.
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1,
(need_slow_path ? success_target : NULL),
failure_target, NULL,
(need_slow_path ? success_target : nullptr),
failure_target, nullptr,
RegisterOrConstant(super_check_offset));
if (need_slow_path) {
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
@ -2572,19 +2572,19 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub();
// Check if it needs to be profiled.
ciMethodData* md = NULL;
ciProfileData* data = NULL;
ciMethodData* md = nullptr;
ciProfileData* data = nullptr;
assert_different_registers(value, k_RInfo, klass_RInfo);
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
assert(method != nullptr, "Should have method");
int bci = op->profiled_bci();
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check");
assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
NearLabel profile_cast_success, profile_cast_failure, done;
@ -2613,7 +2613,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// Get instance klass (it's already uncompressed).
__ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
// Perform the fast part of the checking logic.
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id);
store_parameter(klass_RInfo, 0); // sub
@ -2723,7 +2723,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
if (UseHeavyMonitors) {
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
@ -2731,7 +2731,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// Add debug info for NullPointerException only if one is possible.
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
__ lock_object(hdr, obj, lock, *op->stub()->entry());
@ -2750,7 +2750,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info();
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
@ -2768,9 +2768,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types.
ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
@ -2786,7 +2786,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, tmp1, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type.
@ -2811,7 +2811,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time.
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
metadata2reg(known_klass->constant_encoding(), tmp1);
__ z_stg(tmp1, recv_addr);
@ -2865,7 +2865,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
assert(!tmp->is_valid(), "don't need temporary");
emit_call_c(dest);
CHECK_BAILOUT();
if (info != NULL) {
if (info != nullptr) {
add_call_info_here(info);
}
}
@ -2962,7 +2962,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Label update, next, none, null_seen, init_klass;
bool do_null = !not_null;
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
assert(do_null || do_update, "why are we here?");
@ -2991,7 +2991,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (do_update) {
#ifdef ASSERT
if (exact_klass != NULL) {
if (exact_klass != nullptr) {
__ load_klass(tmp1, tmp1);
metadata2reg(exact_klass->constant_encoding(), tmp2);
__ z_cgr(tmp1, tmp2);
@ -3003,8 +3003,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ z_lg(tmp2, mdo_addr);
if (!no_conflict) {
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != NULL) {
if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != nullptr) {
metadata2reg(exact_klass->constant_encoding(), tmp1);
} else {
__ load_klass(tmp1, tmp1);
@ -3027,7 +3027,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass);
}
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
// Already unknown: Nothing to do anymore.
@ -3040,7 +3040,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ z_bru(do_update);
} else {
// There's a single possible klass at this profile point.
assert(exact_klass != NULL, "should be");
assert(exact_klass != nullptr, "should be");
if (TypeEntries::is_type_none(current_klass)) {
metadata2reg(exact_klass->constant_encoding(), tmp1);
__ z_lgr(Z_R0_scratch, tmp2);
@ -3060,7 +3060,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#endif
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
// Already unknown: Nothing to do anymore.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -103,11 +103,11 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
// z/Architecture cannot inline all constants.
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
if (v->type()->as_IntConstant() != NULL) {
if (v->type()->as_IntConstant() != nullptr) {
return Immediate::is_simm16(v->type()->as_IntConstant()->value());
} else if (v->type()->as_LongConstant() != NULL) {
} else if (v->type()->as_LongConstant() != nullptr) {
return Immediate::is_simm16(v->type()->as_LongConstant()->value());
} else if (v->type()->as_ObjectConstant() != NULL) {
} else if (v->type()->as_ObjectConstant() != nullptr) {
return v->type()->as_ObjectConstant()->value()->is_null_object();
} else {
return false;
@ -115,9 +115,9 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
}
bool LIRGenerator::can_inline_as_constant(Value i, int bits) const {
if (i->type()->as_IntConstant() != NULL) {
if (i->type()->as_IntConstant() != nullptr) {
return Assembler::is_simm(i->type()->as_IntConstant()->value(), bits);
} else if (i->type()->as_LongConstant() != NULL) {
} else if (i->type()->as_LongConstant() != nullptr) {
return Assembler::is_simm(i->type()->as_LongConstant()->value(), bits);
} else {
return can_store_as_constant(i, as_BasicType(i->type()));
@ -267,7 +267,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop.
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL;
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
info_for_exception = state_for (x);
}
@ -326,7 +326,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
default:
ShouldNotReachHere();
}
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), nullptr);
set_result(x, result);
} else {
LIR_Opr reg = rlock(x);
@ -387,7 +387,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
info = nullptr;
}
if (x->op() == Bytecodes::_lrem) {
@ -408,7 +408,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item();
right.load_nonconstant(32);
rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
}
}
@ -463,7 +463,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
info = nullptr;
}
if (x->op() == Bytecodes::_irem) {
@ -519,7 +519,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
// If an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary.
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
@ -558,7 +558,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
void LIRGenerator::do_LogicOp(LogicOp* x) {
// IF an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary.
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
@ -659,7 +659,7 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
case vmIntrinsics::_dexp: {
assert(x->number_of_arguments() == 1, "wrong type");
address runtime_entry = NULL;
address runtime_entry = nullptr;
switch (x->id()) {
case vmIntrinsics::_dsin:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
@ -683,14 +683,14 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
ShouldNotReachHere();
}
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), nullptr);
set_result(x, result);
break;
}
case vmIntrinsics::_dpow: {
assert(x->number_of_arguments() == 2, "wrong type");
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), nullptr);
set_result(x, result);
break;
}
@ -795,7 +795,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
CodeEmitInfo* info = state_for (x, x->state());
// In case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed.
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for (x, x->state_before());
}
@ -826,14 +826,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims();
int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL);
LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for (x, x->state_before());
@ -882,7 +882,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// Must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization).
@ -897,10 +897,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
CodeStub* stub;
if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
} else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception,
Deoptimization::Reason_class_check,
Deoptimization::Action_none);
@ -920,7 +920,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for (x, x->state_before());
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -130,7 +130,7 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking).
// location (null in the displaced hdr location indicates recursive locking).
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Otherwise we don't care about the result and handle locking via runtime call.
branch_optimized(Assembler::bcondNotZero, slow_case);
@ -146,7 +146,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// Load displaced header.
z_ltg(hdr, Address(disp_hdr, (intptr_t)0));
// If the loaded hdr is NULL we had recursive locking, and we are done.
// If the loaded hdr is null we had recursive locking, and we are done.
z_bre(done);
// Load object.
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -96,6 +96,6 @@
Register preserve3 = noreg) PRODUCT_RETURN;
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
#endif // CPU_S390_C1_MACROASSEMBLER_S390_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -66,10 +66,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
// ARG1 must hold thread address.
z_lgr(Z_ARG1, Z_thread);
address return_pc = NULL;
address return_pc = nullptr;
align_call_far_patchable(this->pc());
return_pc = call_c_opt(entry_point);
assert(return_pc != NULL, "const section overflow");
assert(return_pc != nullptr, "const section overflow");
reset_last_Java_frame();
@ -282,7 +282,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// deoptmized, return to the deoptimization handler entry that will
// cause re-execution of the current bytecode.
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ z_ltr(Z_RET, Z_RET); // return value == 0
@ -311,7 +311,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
bool save_fpu_registers = true;
// Stub code and info for the different stubs.
OopMapSet* oop_maps = NULL;
OopMapSet* oop_maps = nullptr;
switch (id) {
case forward_exception_id:
{
@ -527,7 +527,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
const Register Rarray_ptr = Z_ARG5; // Current value from cache array.
if (UseCompressedOops) {
assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub");
}
const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
@ -547,7 +547,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
__ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
__ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss);
__ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, nullptr, &miss);
// Match falls through here.
i = 0;
@ -627,7 +627,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
AddressLiteral dest(deopt_blob->unpack_with_reexecution());
__ load_const_optimized(Z_R1_scratch, dest);
__ z_br(Z_R1_scratch);
@ -761,7 +761,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
__ z_br(Z_R1_scratch);
@ -784,7 +784,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// Save registers if required.
OopMapSet* oop_maps = new OopMapSet();
OopMap* oop_map = NULL;
OopMap* oop_map = nullptr;
Register reg_fp = Z_R1_scratch;
switch (id) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1016,7 +1016,7 @@ unsigned int C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, R
// Return true if the same array.
compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
// Return false if one of them is NULL.
// Return false if one of them is null.
compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -40,34 +40,34 @@
#undef __
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == NULL) {
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
mark = cbuf.insts_mark();
}
assert(mark != NULL, "mark must not be NULL");
assert(mark != nullptr, "mark must not be null");
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) {
return NULL; // CodeBuffer::expand failed.
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.
}
__ relocate(static_stub_Relocation::spec(mark));
AddressLiteral meta = __ allocate_metadata_address(NULL);
AddressLiteral meta = __ allocate_metadata_address(nullptr);
bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta);
__ set_inst_mark();
AddressLiteral a((address)-1);
success = success && __ load_const_from_toc(Z_R1, a);
if (!success) {
return NULL; // CodeCache is full.
return nullptr; // CodeCache is full.
}
__ z_br(Z_R1);
@ -75,7 +75,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
return stub;
#else
ShouldNotReachHere();
return NULL;
return nullptr;
#endif
}
@ -93,7 +93,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
guarantee(stub != nullptr, "stub not found");
if (TraceICs) {
ResourceMark rm;
@ -118,7 +118,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
assert(stub != nullptr, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
@ -138,7 +138,7 @@ void CompiledDirectStaticCall::verify() {
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c
inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ThawBase::patch_pd(frame& f, const frame& caller) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
template<typename FKind>
static inline intptr_t** link_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline int ContinuationHelper::frame_align_words(int size) {
@ -42,7 +42,7 @@ inline int ContinuationHelper::frame_align_words(int size) {
inline intptr_t* ContinuationHelper::frame_align_pointer(intptr_t* sp) {
Unimplemented();
return NULL;
return nullptr;
}
template<typename FKind>
@ -75,18 +75,18 @@ inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
inline intptr_t** ContinuationHelper::Frame::callee_link_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
template<typename FKind>
static inline intptr_t* real_fp(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline address* ContinuationHelper::InterpretedFrame::return_pc_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, const frame& caller) {
@ -95,12 +95,12 @@ inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, cons
inline address* ContinuationHelper::Frame::return_pc_address(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline address ContinuationHelper::Frame::real_pc(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) {
@ -109,22 +109,22 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) {
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, InterpreterOopMap* mask) { // inclusive; this will be copied with the frame
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_bottom(const frame& f) { // exclusive; this will not be copied with the frame
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, int callee_argsize, bool callee_interpreted) {
Unimplemented();
return NULL;
return nullptr;
}
inline intptr_t* ContinuationHelper::InterpretedFrame::callers_sp(const frame& f) {
Unimplemented();
return NULL;
return nullptr;
}
#endif // CPU_S390_CONTINUATIONHELPER_S390_INLINE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,7 @@
// the perfect job. In those cases, decode_instruction0 may kick in
// and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL);
static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr);
// platform-specific instruction annotations (like value of loaded constants)
static void annotate(address pc, outputStream* st);

@ -82,7 +82,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code
if (_cb != NULL ) {
if (_cb != nullptr ) {
// First check if the frame is complete and the test is reliable.
// Unfortunately we can only check frame completeness for runtime stubs.
@ -111,7 +111,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// At this point, there still is a chance that fp_safe is false.
// In particular, (fp == NULL) might be true. So let's check and
// In particular, fp might be null. So let's check and
// bail out before we actually dereference from fp.
if (!fp_safe) {
return false;
@ -123,7 +123,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc.
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_blob == NULL) {
if (sender_blob == nullptr) {
return false;
}
@ -196,7 +196,7 @@ intptr_t* frame::interpreter_frame_sender_sp() const {
}
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
// Java frame called from C. Skip all C frames and return top C
// frame of that chunk as the sender.
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -208,7 +208,7 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL) {
if (jfa->last_Java_pc() != nullptr) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
return fr;
}
@ -249,7 +249,7 @@ void frame::patch_pc(Thread* thread, address pc) {
own_abi()->return_pc = (uint64_t)pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
// assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;
_pc = original_pc;
@ -403,12 +403,12 @@ void frame::back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc, u
st->print("#%-3d ", num);
const char* type_name = " ";
const char* function_name = NULL;
const char* function_name = nullptr;
// Detect current frame's frame_type, default to 'C frame'.
frame_type = 0;
CodeBlob* blob = NULL;
CodeBlob* blob = nullptr;
if (Interpreter::contains(current_pc)) {
frame_type = 1;

@ -124,7 +124,7 @@ inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) {
// Accessors
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. NULL
// can distinguish identity and younger/older relationship. null
// represents an invalid (incomparable) frame.
inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
@ -134,7 +134,7 @@ inline intptr_t* frame::id(void) const {
// Return true if this frame is older (less recent activation) than
// the frame represented by id.
inline bool frame::is_older(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id");
assert(this->id() != nullptr && id != nullptr, "null frame id");
// Stack grows towards smaller addresses on z/Architecture.
return this->id() > id;
}
@ -304,17 +304,17 @@ inline intptr_t* frame::real_fp() const {
}
inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL;
if (_cb->oop_maps() != NULL) {
if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) {
if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc);
}
const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map;
}
return NULL;
return nullptr;
}
inline int frame::compiled_frame_stack_argsize() const {

@ -107,13 +107,13 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
Label done;
if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
if (on_oop && on_reference && L_handle_null == nullptr) { L_handle_null = &done; }
ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null);
if (on_oop && on_reference) {
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
NULL /* obj */,
nullptr /* obj */,
dst /* pre_val */,
noreg/* preserve */ ,
tmp1, tmp2 /* tmp */,
@ -132,7 +132,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
) {
bool not_null = (decorators & IS_NOT_NULL) != 0,
preloaded = obj == NULL;
preloaded = obj == nullptr;
const Register Robj = obj ? obj->base() : noreg,
Roff = obj ? obj->index() : noreg;
@ -170,7 +170,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
}
}
// Is the previous value NULL?
// Is the previous value null?
// If so, we don't need to record it and we're done.
// Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
// Register contents is preserved across runtime call if caller requests to do so.
@ -181,12 +181,12 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
#endif
} else {
__ z_ltgr(Rpre_val, Rpre_val);
__ z_bre(filtered); // previous value is NULL, so we don't need to record it.
__ z_bre(filtered); // previous value is null, so we don't need to record it.
}
// Decode the oop now. We know it's not NULL.
// Decode the oop now. We know it's not null.
if (Robj != noreg && UseCompressedOops) {
__ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
__ oop_decoder(Rpre_val, Rpre_val, /*maybenullptr=*/false);
}
// OK, it's not filtered, so we'll need to call enqueue.
@ -285,7 +285,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
__ z_bre(filtered);
// Crosses regions, storing NULL?
// Crosses regions, storing null?
if (not_null) {
#ifdef ASSERT
__ z_ltgr(Rnew_val, Rnew_val);
@ -298,7 +298,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
Rnew_val = noreg; // end of lifetime
// Storing region crossing non-NULL, is card already dirty?
// Storing region crossing non-null, is card already dirty?
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
// Make sure not to use Z_R0 for any of these registers.
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
@ -320,7 +320,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar.
__ z_bre(filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Storing a region crossing, non-null oop, card is clean.
// Dirty card and log.
__ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val());
@ -380,7 +380,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
// No need for post barrier if storing NULL
// No need for post barrier if storing null
if (val != noreg) {
const Register base = dst.base(),
idx = dst.index();
@ -395,7 +395,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
NearLabel Ldone, Lnot_weak;
__ z_ltgr(tmp1, value);
__ z_bre(Ldone); // Use NULL result as-is.
__ z_bre(Ldone); // Use null result as-is.
__ z_nill(value, ~JNIHandles::tag_mask);
__ z_lg(value, 0, value); // Resolve (untagged) jobject.
@ -404,7 +404,7 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
__ z_braz(Lnot_weak);
__ verify_oop(value, FILE_AND_LINE);
DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF;
g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
g1_write_barrier_pre(masm, decorators, (const Address*)nullptr, value, noreg, tmp1, tmp2, true);
__ bind(Lnot_weak);
__ verify_oop(value, FILE_AND_LINE);
__ bind(Ldone);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -42,7 +42,7 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
bool do_return);
void g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
const Address* obj, // Address of oop or NULL if pre-loaded.
const Address* obj, // Address of oop or null if pre-loaded.
Register Rpre_val, // Ideally, this is a non-volatile register.
Register Rval, // Will be preserved.
Register Rtmp1, // If Rpre_val is volatile, either Rtmp1
@ -65,7 +65,7 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
#endif
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = NULL);
const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = nullptr);
virtual void resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2);
};

@ -53,7 +53,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
case T_OBJECT: {
if (UseCompressedOops && in_heap) {
__ z_llgf(dst, addr);
if (L_handle_null != NULL) { // Label provided.
if (L_handle_null != nullptr) { // Label provided.
__ compareU32_and_branch(dst, (intptr_t)0, Assembler::bcondEqual, *L_handle_null);
__ oop_decoder(dst, dst, false);
} else {
@ -61,7 +61,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
}
} else {
__ z_lg(dst, addr);
if (L_handle_null != NULL) {
if (L_handle_null != nullptr) {
__ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondEqual, *L_handle_null);
}
}
@ -108,7 +108,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
NearLabel Ldone;
__ z_ltgr(tmp1, value);
__ z_bre(Ldone); // Use NULL result as-is.
__ z_bre(Ldone); // Use null result as-is.
__ z_nill(value, ~JNIHandles::tag_mask);
__ z_lg(value, 0, value); // Resolve (untagged) jobject.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -40,7 +40,7 @@ public:
Register dst, Register count, bool do_return = false);
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& addr, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = NULL);
const Address& addr, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = nullptr);
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& addr, Register val, Register tmp1, Register tmp2, Register tmp3);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -160,7 +160,7 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
// No need for post barrier if storing NULL
// No need for post barrier if storing null
if (val != noreg) {
const Register base = dst.base(),
idx = dst.index();

@ -34,7 +34,7 @@
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast.
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);

@ -57,7 +57,7 @@
#endif
void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
assert(entry != NULL, "Entry must have been generated by now");
assert(entry != nullptr, "Entry must have been generated by now");
assert(Rscratch != Z_R0, "Can't use R0 for addressing");
branch_optimized(Assembler::bcondAlways, entry);
}
@ -93,7 +93,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bo
verify_FPU(1, state);
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{ Label OK;
// Check if the frame pointer in Z_fp is correct.
z_cg(Z_fp, 0, Z_SP);
@ -274,7 +274,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
Register jvmti_thread_state = Z_ARG2;
Register tmp = Z_ARG3;
load_and_test_long(jvmti_thread_state, Address(Z_thread, JavaThread::jvmti_thread_state_offset()));
z_bre(L); // if (thread->jvmti_thread_state() == NULL) exit;
z_bre(L); // if (thread->jvmti_thread_state() == nullptr) exit;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
@ -617,7 +617,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
void InterpreterMacroAssembler::verify_esp(Register Resp, Register Rtemp) {
// About to read or write Resp[0].
// Make sure it is not in the monitors or the TOP_IJAVA_FRAME_ABI.
address reentry = NULL;
address reentry = nullptr;
{
// Check if the frame pointer in Z_fp is correct.
@ -995,7 +995,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL);
// monitor->lock()->set_displaced_header(nullptr);
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
@ -1040,7 +1040,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL);
// monitor->lock()->set_displaced_header(nullptr);
// We did not see an unlocked object so try the fast recursive case.
@ -1094,12 +1094,12 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// else {
// template code:
//
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
// monitor->set_obj(nullptr);
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// monitor->set_obj(nullptr);
// } else {
// // Slow path.
// InterpreterRuntime::monitorexit(monitor);
@ -1120,9 +1120,9 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
assert_different_registers(monitor, object, displaced_header, current_header);
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
// monitor->set_obj(nullptr);
clear_mem(obj_entry, sizeof(oop));
@ -1134,7 +1134,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// monitor->set_obj(nullptr);
// If we still have a lightweight lock, unlock the object and be done.
@ -1176,7 +1176,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
Register method = Z_ARG5;
get_method(method);
// Test MDO to avoid the call if it is NULL.
// Test MDO to avoid the call if it is null.
load_and_test_long(mdp, method2_(method, method_data));
z_brz(set_mdp);
@ -1462,7 +1462,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
}
// In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL.
// observed the receiver[start_row] is null.
// Fill in the receiver field and increment the count.
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
@ -1478,13 +1478,13 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != NULL) {
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != NULL) {
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -118,10 +118,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
as_Register(int_arg_nr) + Z_ARG1->encoding() : Z_R0;
// The handle for a receiver will never be null.
bool do_NULL_check = offset() != 0 || is_static();
bool do_nullptr_check = offset() != 0 || is_static();
Label do_null;
if (do_NULL_check) {
if (do_nullptr_check) {
__ clear_reg(r, true, false);
__ load_and_test_long(Z_R0, locals_j_arg_at(offset()));
__ z_bre(do_null);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -37,11 +37,11 @@
inline void clear(void) {
// Clearing _last_Java_sp must be first.
OrderAccess::release();
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
// Fence?
OrderAccess::fence();
_last_Java_pc = NULL;
_last_Java_pc = nullptr;
}
inline void set(intptr_t* sp, address pc) {
@ -55,12 +55,12 @@
// In order to make sure the transition state is valid for "this"
// we must clear _last_Java_sp before copying the rest of the new data.
// Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
// To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing.
//
if (_last_Java_sp != src->_last_Java_sp) {
OrderAccess::release();
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
OrderAccess::fence();
}
_last_Java_pc = src->_last_Java_pc;
@ -77,7 +77,7 @@
public:
// We don't have a frame pointer.
intptr_t* last_Java_fp(void) { return NULL; }
intptr_t* last_Java_fp(void) { return nullptr; }
intptr_t* last_Java_sp() const { return _last_Java_sp; }
void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -58,7 +58,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
default: ShouldNotReachHere();
name = NULL; // unreachable
name = nullptr; // unreachable
}
ResourceMark rm;
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
@ -129,7 +129,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
default: ShouldNotReachHere();
slow_case_addr = NULL; // unreachable
slow_case_addr = nullptr; // unreachable
}
__ load_const_optimized(Robj, slow_case_addr);
__ z_br(Robj); // tail call

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -77,7 +77,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
// Convert JVMCI register indices (as used in oop maps) to HotSpot registers.
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
return NULL;
return nullptr;
}
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {

@ -887,7 +887,7 @@ void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Regi
// Returns 0 (zero) if no consts section exists or if it has size zero.
long MacroAssembler::toc_distance() {
CodeSection* cs = code()->consts();
return (long)((cs != NULL) ? cs->start()-pc() : 0);
return (long)((cs != nullptr) ? cs->start()-pc() : 0);
}
// Implementation on x86/sparc assumes that constant and instruction section are
@ -1142,9 +1142,9 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
// referring to a position-fixed target location.
// If not so, relocations and patching must be used.
void MacroAssembler::load_absolute_address(Register d, address addr) {
assert(addr != NULL, "should not happen");
assert(addr != nullptr, "should not happen");
BLOCK_COMMENT("load_absolute_address:");
if (addr == NULL) {
if (addr == nullptr) {
z_larl(d, pc()); // Dummy emit for size calc.
return;
}
@ -1795,27 +1795,27 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
//===========================================================================
AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->allocate_metadata_index(obj);
RelocationHolder rspec = metadata_Relocation::spec(index);
return AddressLiteral((address)obj, rspec);
}
AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(obj);
RelocationHolder rspec = metadata_Relocation::spec(index);
return AddressLiteral((address)obj, rspec);
}
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->allocate_oop_index(obj);
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
}
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
}
@ -2172,7 +2172,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
// ARG1 must hold thread address.
z_lgr(Z_ARG1, Z_thread);
address return_pc = NULL;
address return_pc = nullptr;
if (allow_relocation) {
return_pc = call_c(entry_point);
} else {
@ -2377,7 +2377,7 @@ address MacroAssembler::call_c_static(address function_entry) {
address MacroAssembler::call_c_opt(address function_entry) {
bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
_last_calls_return_pc = success ? pc() : NULL;
_last_calls_return_pc = success ? pc() : nullptr;
return _last_calls_return_pc;
}
@ -2571,7 +2571,7 @@ address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_ad
call_far_patchable_size());
Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
@ -2632,7 +2632,7 @@ bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
// Extract poll address from instruction and ucontext.
address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
assert(ucontext != NULL, "must have ucontext");
assert(ucontext != nullptr, "must have ucontext");
ucontext_t* uc = (ucontext_t*) ucontext;
unsigned long z_instruction;
unsigned int ilen = get_instruction(instr_loc, &z_instruction);
@ -2650,7 +2650,7 @@ address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
// Extract poll register from instruction.
@ -2778,7 +2778,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
bind(search);
// Handle IncompatibleClassChangeError.
// If the entry is NULL then we've reached the end of the table
// If the entry is null then we've reached the end of the table
// without finding the expected interface, so throw an exception.
load_and_test_long(itable_interface, Address(itable_entry_addr));
z_bre(no_such_interface);
@ -2945,12 +2945,12 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
NearLabel L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1 ||
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
"at most one NULL in the batch, usually");
"at most one null in the batch, usually");
BLOCK_COMMENT("check_klass_subtype_fast_path {");
// If the pointers are equal, we are done (e.g., String[] elements).
@ -3031,9 +3031,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
NearLabel L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one null in the batch");
const int ss_offset = in_bytes(Klass::secondary_supers_offset());
const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
@ -3100,20 +3100,20 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
NearLabel failure;
BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
&L_success, &failure, NULL);
&L_success, &failure, nullptr);
check_klass_subtype_slow_path(sub_klass, super_klass,
temp1_reg, temp2_reg, &L_success, NULL);
temp1_reg, temp2_reg, &L_success, nullptr);
BIND(failure);
BLOCK_COMMENT("} check_klass_subtype");
}
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
Label L_fallthrough;
if (L_fast_path == NULL) {
if (L_fast_path == nullptr) {
L_fast_path = &L_fallthrough;
} else if (L_slow_path == NULL) {
} else if (L_slow_path == nullptr) {
L_slow_path = &L_fallthrough;
}
@ -3203,10 +3203,10 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
Register zero = temp;
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL,
// The object's monitor m is unlocked iff m->owner is null,
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
// Try to CAS m->owner from null to current thread.
z_lghi(zero, 0);
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
@ -3306,7 +3306,7 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja
}
// When returning from calling out from Java mode the frame anchor's
// last_Java_pc will always be set to NULL. It is set here so that
// last_Java_pc will always be set to null. It is set here so that
// if we are doing a call to native (not VM) that we capture the
// known pc and don't have to rely on the native call having a
// standard frame linkage where we can find the pc.
@ -3402,13 +3402,13 @@ void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
bind(ok);
} else {
if (needs_explicit_null_check((intptr_t)offset)) {
// Provoke OS NULL exception if reg = NULL by
// Provoke OS null exception if reg is null by
// accessing M[reg] w/o changing any registers.
z_lg(tmp, 0, reg);
}
// else
// Nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL.
// will provoke OS null exception if reg is null.
}
}
@ -3447,7 +3447,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
current = dst;
}
if (base != NULL) {
if (base != nullptr) {
// Use scaled-down base address parts to match scaled-down klass pointer.
unsigned int base_h = ((unsigned long)base)>>(32+shift);
unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
@ -3514,7 +3514,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
// This function calculates the size of the code generated by
// decode_klass_not_null(register dst, Register src)
// when (Universe::heap() != NULL). Hence, if the instructions
// when Universe::heap() isn't null. Hence, if the instructions
// it generates change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
address base = CompressedKlassPointers::base();
@ -3522,7 +3522,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
int addbase_size = 0;
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
if (base != NULL) {
if (base != nullptr) {
unsigned int base_h = ((unsigned long)base)>>32;
unsigned int base_l = (unsigned int)((unsigned long)base);
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
@ -3557,7 +3557,7 @@ void MacroAssembler::decode_klass_not_null(Register dst) {
if (shift != 0) { // Shift required?
z_sllg(dst, dst, shift);
}
if (base != NULL) {
if (base != nullptr) {
unsigned int base_h = ((unsigned long)base)>>32;
unsigned int base_l = (unsigned int)((unsigned long)base);
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
@ -3604,7 +3604,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
lgr_if_needed(dst, src);
}
if (base != NULL) {
if (base != nullptr) {
unsigned int base_h = ((unsigned long)base)>>32;
unsigned int base_l = (unsigned int)((unsigned long)base);
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
@ -3679,8 +3679,8 @@ void MacroAssembler::store_klass_gap(Register s, Register d) {
// Rop1 - klass in register, always uncompressed.
// disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
// Rbase - Base address of cKlass in memory.
// maybeNULL - True if Rop1 possibly is a NULL.
void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
// maybenull - True if Rop1 possibly is a null.
void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull) {
BLOCK_COMMENT("compare klass ptr {");
@ -3694,7 +3694,7 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
if (base == NULL) {
if (base == nullptr) {
if (shift == 0) {
z_cl(Rop1, disp, Rbase); // Unscaled
} else {
@ -3709,7 +3709,7 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba
Register current = Rop1;
Label done;
if (maybeNULL) { // NULL ptr must be preserved!
if (maybenull) { // null ptr must be preserved!
z_ltgr(Z_R0, current);
z_bre(done);
current = Z_R0;
@ -3812,9 +3812,9 @@ int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
// Rop1 - Oop in register.
// disp - Offset of cOop in memory.
// Rbase - Base address of cOop in memory.
// maybeNULL - True if Rop1 possibly is a NULL.
// maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
// maybenull - True if Rop1 possibly is a null.
// maybenulltarget - Branch target for Rop1 == nullptr, if flow control shall NOT continue with compare instruction.
void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybenull) {
Register Rbase = mem.baseOrR0();
Register Rindex = mem.indexOrR0();
int64_t disp = mem.disp();
@ -3823,7 +3823,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL
address base = CompressedOops::base();
assert(UseCompressedOops, "must be on to call this method");
assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
assert(Universe::heap() != nullptr, "java heap must be initialized to call this method");
assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
assert_different_registers(Rop1, Z_R0);
assert_different_registers(Rop1, Rbase, Z_R1);
@ -3833,7 +3833,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
if (base == NULL) {
if (base == nullptr) {
if (shift == 0) {
z_cl(Rop1, disp, Rindex, Rbase); // Unscaled
} else {
@ -3848,7 +3848,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL
Label done;
int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
if (maybeNULL) { // NULL ptr must be preserved!
if (maybenull) { // null ptr must be preserved!
z_ltgr(Z_R0, Rop1);
z_bre(done);
}
@ -3928,7 +3928,7 @@ void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
//
// only32bitValid is set, if later code only uses the lower 32 bits. In this
// case we must not fix the upper 32 bits.
void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybenull,
Register Rbase, int pow2_offset, bool only32bitValid) {
const address oop_base = CompressedOops::base();
@ -3936,20 +3936,20 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
const bool disjoint = CompressedOops::base_disjoint();
assert(UseCompressedOops, "must be on to call this method");
assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
assert(Universe::heap() != nullptr, "java heap must be initialized to call this encoder");
assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
if (disjoint || (oop_base == NULL)) {
if (disjoint || (oop_base == nullptr)) {
BLOCK_COMMENT("cOop encoder zeroBase {");
if (oop_shift == 0) {
if (oop_base != NULL && !only32bitValid) {
if (oop_base != nullptr && !only32bitValid) {
z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
} else {
lgr_if_needed(Rdst, Rsrc);
}
} else {
z_srlg(Rdst, Rsrc, oop_shift);
if (oop_base != NULL && !only32bitValid) {
if (oop_base != nullptr && !only32bitValid) {
z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
}
}
@ -3963,7 +3963,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
BLOCK_COMMENT("cOop encoder general {");
assert_different_registers(Rdst, Z_R1);
assert_different_registers(Rsrc, Rbase);
if (maybeNULL) {
if (maybenull) {
Label done;
// We reorder shifting and subtracting, so that we can compare
// and shift in parallel:
@ -3990,7 +3990,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
}
assert_different_registers(Rdst, Rbase);
// Check for NULL oop (must be left alone) and shift.
// Check for null oop (must be left alone) and shift.
if (oop_shift != 0) { // Shift out alignment bits
if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code.
@ -4001,7 +4001,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
// z_cghi(Rsrc, 0);
}
} else {
z_ltgr(Rdst, Rsrc); // Move NULL to result register.
z_ltgr(Rdst, Rsrc); // Move null to result register.
}
z_bre(done);
@ -4064,20 +4064,20 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
// - avoid Z_R0 for any of the argument registers.
// - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
// - avoid Z_R1 for Rdst if Rdst == Rbase.
void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase, int pow2_offset) {
const address oop_base = CompressedOops::base();
const int oop_shift = CompressedOops::shift();
const bool disjoint = CompressedOops::base_disjoint();
assert(UseCompressedOops, "must be on to call this method");
assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
assert(Universe::heap() != nullptr, "java heap must be initialized to call this decoder");
assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
"cOop encoder detected bad shift");
// cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
if (oop_base != NULL) {
if (oop_base != nullptr) {
unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
@ -4088,7 +4088,7 @@ void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, R
Label done;
// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
if (maybeNULL) { // NULL ptr must be preserved!
if (maybenull) { // null ptr must be preserved!
z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code.
z_bre(done);
} else {
@ -4148,9 +4148,9 @@ void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, R
}
if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
// Scale oop and check for NULL.
// Scale oop and check for null.
// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
if (maybeNULL) { // NULL ptr must be preserved!
if (maybenull) { // null ptr must be preserved!
z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code.
z_bre(done);
} else {
@ -4428,11 +4428,11 @@ int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
long value = val.value();
address tocPos = long_constant(value);
if (tocPos != NULL) {
if (tocPos != nullptr) {
int tocOffset = (int)(tocPos - code()->consts()->start());
return tocOffset;
}
// Address_constant returned NULL, so no constant entry has been created.
// Address_constant returned null, so no constant entry has been created.
// In that case, we return a "fatal" offset, just in case that subsequently
// generated access code is executed.
return -1;
@ -4446,7 +4446,7 @@ int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
// where x is the address of the constant pool entry.
address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
if (tocPos != NULL) {
if (tocPos != nullptr) {
int tocOffset = (int)(tocPos - code()->consts()->start());
RelocationHolder rsp = oop.rspec();
Relocation *rel = rsp.reloc();
@ -4460,7 +4460,7 @@ int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
return tocOffset;
}
// Address_constant returned NULL, so no constant entry has been created
// Address_constant returned null, so no constant entry has been created
// in that case, we return a "fatal" offset, just in case that subsequently
// generated access code is executed.
return -1;
@ -4470,7 +4470,7 @@ bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Regist
int tocOffset = store_const_in_toc(a);
if (tocOffset == -1) return false;
address tocPos = tocOffset + code()->consts()->start();
assert((address)code()->consts()->start() != NULL, "Please add CP address");
assert((address)code()->consts()->start() != nullptr, "Please add CP address");
relocate(a.rspec());
load_long_pcrelative(dst, tocPos);
return true;
@ -4480,7 +4480,7 @@ bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register
int tocOffset = store_oop_in_toc(a);
if (tocOffset == -1) return false;
address tocPos = tocOffset + code()->consts()->start();
assert((address)code()->consts()->start() != NULL, "Please add CP address");
assert((address)code()->consts()->start() != nullptr, "Please add CP address");
load_addr_pcrelative(dst, tocPos);
return true;
@ -4494,7 +4494,7 @@ intptr_t MacroAssembler::get_const_from_toc(address pc) {
assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
long offset = get_load_const_from_toc_offset(pc);
address dataLoc = NULL;
address dataLoc = nullptr;
if (is_load_const_from_toc_pcrelative(pc)) {
dataLoc = pc + offset;
} else {
@ -4513,12 +4513,12 @@ void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBl
assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
long offset = MacroAssembler::get_load_const_from_toc_offset(pc);
address dataLoc = NULL;
address dataLoc = nullptr;
if (is_load_const_from_toc_pcrelative(pc)) {
dataLoc = pc+offset;
} else {
nmethod* nm = CodeCache::find_nmethod(pc);
assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
assert((cb == nullptr) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
dataLoc = nm->ctable_begin() + offset;
}
if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
@ -5534,7 +5534,7 @@ void MacroAssembler::stop(int type, const char* msg, int id) {
// should be given for "hand-written" code, if all chain calls are in the same code blob.
// Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==nullptr?"init":"cont", allow_relocation?"reloc ":"static", msg));
// Setup arguments.
if (allow_relocation) {
@ -5545,7 +5545,7 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i
load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
load_absolute_address(Z_ARG2, (address)msg);
}
if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
if ((reentry != nullptr) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
BLOCK_COMMENT("branch to reentry point:");
z_brc(bcondAlways, reentry);
} else {
@ -5554,7 +5554,7 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i
save_return_pc(); // Saves return pc Z_R14.
push_frame_abi160(0);
if (allow_relocation) {
reentry = NULL; // Prevent reentry if code relocation is allowed.
reentry = nullptr; // Prevent reentry if code relocation is allowed.
call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
} else {
call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
@ -5569,7 +5569,7 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i
// Special version of stop() for code size reduction.
// Assumes constant relative addresses for data and runtime call.
void MacroAssembler::stop_static(int type, const char* msg, int id) {
stop_chain(NULL, type, msg, id, false);
stop_chain(nullptr, type, msg, id, false);
}
void MacroAssembler::stop_subroutine() {

@ -679,7 +679,7 @@ class MacroAssembler: public Assembler {
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// One of the three labels can be null, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg and temp2_reg.
// If super_check_offset is not -1, temp1_reg is not used and can be noreg.
@ -713,8 +713,8 @@ class MacroAssembler: public Assembler {
void clinit_barrier(Register klass,
Register thread,
Label* L_fast_path = NULL,
Label* L_slow_path = NULL);
Label* L_fast_path = nullptr,
Label* L_slow_path = nullptr);
// Increment a counter at counter_address when the eq condition code is set.
// Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
@ -747,9 +747,9 @@ class MacroAssembler: public Assembler {
// Vm result is currently getting hijacked to for oop preservation.
void set_vm_result(Register oop_result);
// Support for NULL-checks
// Support for null-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
// Generates code that causes a null OS exception if the content of reg is null.
// If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size).
@ -771,7 +771,7 @@ class MacroAssembler: public Assembler {
// This function calculates the size of the code generated by
// decode_klass_not_null(register dst)
// when (Universe::heap() != NULL). Hence, if the instructions
// when Universe::heap() isn't null. Hence, if the instructions
// it generates change, then this method needs to be updated.
static int instr_size_for_decode_klass_not_null();
@ -781,8 +781,8 @@ class MacroAssembler: public Assembler {
static int get_oop_base_pow2_offset(uint64_t oop_base);
int get_oop_base(Register Rbase, uint64_t oop_base);
int get_oop_base_complement(Register Rbase, uint64_t oop_base);
void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL);
void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL);
void compare_heap_oop(Register Rop1, Address mem, bool maybenull);
void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull);
// Access heap oop, handle encoding and GC barriers.
private:
@ -791,20 +791,20 @@ class MacroAssembler: public Assembler {
Register tmp1, Register tmp2, Register tmp3);
void access_load_at(BasicType type, DecoratorSet decorators,
const Address& addr, Register dst,
Register tmp1, Register tmp2, Label *is_null = NULL);
Register tmp1, Register tmp2, Label *is_null = nullptr);
public:
// tmp1 and tmp2 are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF.
void load_heap_oop(Register dest, const Address &a,
Register tmp1, Register tmp2,
DecoratorSet decorators = 0, Label *is_null = NULL);
DecoratorSet decorators = 0, Label *is_null = nullptr);
void store_heap_oop(Register Roop, const Address &a,
Register tmp1, Register tmp2, Register tmp3,
DecoratorSet decorators = 0);
void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
void oop_encoder(Register Rdst, Register Rsrc, bool maybenull,
Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false);
void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL,
void oop_decoder(Register Rdst, Register Rsrc, bool maybenull,
Register Rbase = Z_R1, int pow2_offset = -1);
void resolve_oop_handle(Register result);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -71,7 +71,7 @@
// Set this as clone_shift_expressions.
static bool narrow_oop_use_complex_address() {
if (CompressedOops::base() == NULL && CompressedOops::shift() == 0) return true;
if (CompressedOops::base() == nullptr && CompressedOops::shift() == 0) return true;
return false;
}
@ -84,12 +84,12 @@
static bool const_oop_prefer_decode() {
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
return CompressedOops::base() == NULL;
return CompressedOops::base() == nullptr;
}
static bool const_klass_prefer_decode() {
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
return CompressedKlassPointers::base() == NULL;
return CompressedKlassPointers::base() == nullptr;
}
// Is it better to copy float constants, or load them directly from memory?

@ -180,7 +180,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ z_br(target);
__ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!");
assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
__ z_br(target);
}
@ -249,14 +249,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// adapters via MethodHandleNatives.linkMethod. They all allow an
// appendix argument.
__ should_not_reach_here(); // Empty stubs make SG sick.
return NULL;
return nullptr;
}
// No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) {
__ should_not_reach_here(); // Empty stubs make SG sick.
return NULL;
return nullptr;
}
// Z_R10: sender SP (must preserve; see prepare_to_jump_from_interprted)
@ -559,8 +559,8 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* sender_sp,
intptr_t* args,
intptr_t* tracing_fp) {
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // Static linkers don't have MH.
bool has_mh = (strstr(adaptername, "/static") == nullptr &&
strstr(adaptername, "linkTo") == nullptr); // Static linkers don't have MH.
const char* mh_reg_name = has_mh ? "Z_R4_mh" : "Z_R4";
log_info(methodhandles)("MH %s %s=" INTPTR_FORMAT " sender_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT,
adaptername, mh_reg_name,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -55,7 +55,7 @@
void NativeInstruction::verify() {
// Make sure code pattern is actually an instruction address.
// Do not allow:
// - NULL
// - null
// - any address in first page (0x0000 .. 0x0fff)
// - odd address (will cause a "specification exception")
address addr = addr_at(0);
@ -68,7 +68,7 @@ void NativeInstruction::verify() {
// Print location and value (hex representation) of current NativeInstruction
void NativeInstruction::print(const char* msg) const {
int len = Assembler::instr_len(addr_at(0));
if (msg == NULL) { // Output line without trailing blanks.
if (msg == nullptr) { // Output line without trailing blanks.
switch (len) {
case 2: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x", p2i(addr_at(0)), len, halfword_at(0)); break;
case 4: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2)); break;
@ -89,20 +89,20 @@ void NativeInstruction::print(const char* msg) const {
}
}
void NativeInstruction::print() const {
print(NULL);
print(nullptr);
}
// Hex-Dump of storage around current NativeInstruction. Also try disassembly.
void NativeInstruction::dump(const unsigned int range, const char* msg) const {
Assembler::dump_code_range(tty, addr_at(0), range, (msg == NULL) ? "":msg);
Assembler::dump_code_range(tty, addr_at(0), range, (msg == nullptr) ? "":msg);
}
void NativeInstruction::dump(const unsigned int range) const {
dump(range, NULL);
dump(range, nullptr);
}
void NativeInstruction::dump() const {
dump(32, NULL);
dump(32, nullptr);
}
void NativeInstruction::set_halfword_at(int offset, short i) {
@ -176,7 +176,7 @@ bool NativeInstruction::is_sigill_not_entrant() {
// (see implementation of is_illegal() for details).
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
if (cb == NULL || !cb->is_nmethod()) {
if (cb == nullptr || !cb->is_nmethod()) {
return false;
}
@ -255,7 +255,7 @@ void NativeFarCall::verify() {
address NativeFarCall::destination() {
assert(MacroAssembler::is_call_far_patchable_at((address)this), "unexpected call type");
address ctable = NULL;
address ctable = nullptr;
return MacroAssembler::get_dest_of_call_far_patchable_at((address)this, ctable);
}
@ -368,7 +368,7 @@ address NativeMovConstReg::next_instruction_address(int offset) const {
#else
guarantee(false, "Not a NativeMovConstReg site");
#endif
return NULL;
return nullptr;
}
intptr_t NativeMovConstReg::data() const {
@ -385,7 +385,7 @@ intptr_t NativeMovConstReg::data() const {
#else
ShouldNotReachHere();
#endif
return *(intptr_t *)NULL;
return *(intptr_t *)nullptr;
} else {
// Otherwise, assume data resides in TOC. Is asserted in called method.
return MacroAssembler::get_const_from_toc(loc);
@ -481,15 +481,15 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty
address next_address = set_data_plain(data, cb);
// 'RelocIterator' requires an nmethod
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
nmethod* nm = cb ? cb->as_nmethod_or_null() : nullptr;
if (nm != nullptr) {
RelocIterator iter(nm, instruction_address(), next_address);
oop* oop_addr = NULL;
Metadata** metadata_addr = NULL;
oop* oop_addr = nullptr;
Metadata** metadata_addr = nullptr;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
if (oop_addr == nullptr) {
oop_addr = r->oop_addr();
*oop_addr = cast_to_oop(data);
} else {
@ -498,7 +498,7 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty
}
if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation *r = iter.metadata_reloc();
if (metadata_addr == NULL) {
if (metadata_addr == nullptr) {
metadata_addr = r->metadata_addr();
*metadata_addr = (Metadata*)data;
} else {
@ -507,8 +507,8 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty
}
}
assert(expected_type == relocInfo::none ||
(expected_type == relocInfo::metadata_type && metadata_addr != NULL) ||
(expected_type == relocInfo::oop_type && oop_addr != NULL),
(expected_type == relocInfo::metadata_type && metadata_addr != nullptr) ||
(expected_type == relocInfo::oop_type && oop_addr != nullptr),
"%s relocation not found", expected_type == relocInfo::oop_type ? "oop" : "metadata");
}
}
@ -540,7 +540,7 @@ void NativeMovConstReg::set_narrow_klass(intptr_t data) {
ICache::invalidate_range(start, range);
}
void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = NULL */) {
void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = nullptr */) {
address next_address;
address loc = addr_at(0);
@ -565,7 +565,7 @@ void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passe
}
}
void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = NULL */) {
void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = nullptr */) {
address next_address;
address loc = addr_at(0);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -257,7 +257,7 @@ class NativeCall: public NativeInstruction {
((NativeCall*)iaddr)->print();
guarantee(false, "Not a NativeCall site");
return NULL;
return nullptr;
}
address return_address() const {
@ -325,7 +325,7 @@ class NativeCall: public NativeInstruction {
// instruction, is always prepended with a NOP. This measure avoids
// ambiguities with load_const_from_toc_call.
friend NativeCall* nativeCall_before(address return_address) {
NativeCall *call = NULL;
NativeCall *call = nullptr;
// Make sure not to return garbage
address instp = return_address - MacroAssembler::load_const_call_size();
@ -486,8 +486,8 @@ class NativeMovConstReg: public NativeInstruction {
// Patch narrow oop constant in code stream.
void set_narrow_oop(intptr_t data);
void set_narrow_klass(intptr_t data);
void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = NULL);
void set_pcrel_data(intptr_t data, CompiledMethod *nm = NULL);
void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = nullptr);
void set_pcrel_data(intptr_t data, CompiledMethod *nm = nullptr);
void verify();
@ -664,13 +664,13 @@ public:
inline NativePostCallNop* nativePostCallNop_at(address address) {
// Unimplemented();
return NULL;
return nullptr;
}
class NativeDeoptInstruction: public NativeInstruction {
public:
address instruction_address() const { Unimplemented(); return NULL; }
address next_instruction_address() const { Unimplemented(); return NULL; }
address instruction_address() const { Unimplemented(); return nullptr; }
address next_instruction_address() const { Unimplemented(); return nullptr; }
void verify() { Unimplemented(); }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -33,8 +33,8 @@
private:
// This is the hook for finding a register in a "well-known" location,
// such as a register block of a predetermined format.
// Since there is none, we just return NULL.
address pd_location(VMReg reg) const {return NULL;}
// Since there is none, we just return null.
address pd_location(VMReg reg) const {return nullptr;}
address pd_location(VMReg base_reg, int slot_idx) const {
return location(base_reg->next(slot_idx), nullptr);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -99,7 +99,7 @@ address Relocation::pd_call_destination(address orig_addr) {
return (address)(-1);
}
NativeFarCall* call;
if (orig_addr == NULL) {
if (orig_addr == nullptr) {
call = nativeFarCall_at(inst_addr);
} else {
// must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative

@ -114,12 +114,12 @@ void OptoRuntime::generate_exception_blob() {
// Pop the exception blob's C frame that has been pushed before.
__ z_lgr(Z_SP, saved_sp);
// [Z_RET]!=NULL was possible in hotspot5 but not in sapjvm6.
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
// C2I adapter extensions are now removed by a resize in the frame manager
// (unwind_initial_activation_pending_exception).
#ifdef ASSERT
__ z_ltgr(handle_exception, handle_exception);
__ asm_assert_ne("handler must not be NULL", 0x852);
__ asm_assert_ne("handler must not be null", 0x852);
#endif
// Handle_exception contains the handler address. If the associated frame
@ -145,6 +145,6 @@ void OptoRuntime::generate_exception_blob() {
masm->flush();
// Set exception blob.
OopMapSet *oop_maps = NULL;
OopMapSet *oop_maps = nullptr;
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, frame_size/wordSize);
}

@ -293,7 +293,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg
OopMap* map = new OopMap(frame_size_in_slots, 0);
int regstosave_num = 0;
const RegisterSaver::LiveRegType* live_regs = NULL;
const RegisterSaver::LiveRegType* live_regs = nullptr;
switch (reg_set) {
case all_registers:
@ -398,7 +398,7 @@ OopMap* RegisterSaver::generate_oop_map(MacroAssembler* masm, RegisterSet reg_se
OopMap* map = new OopMap(frame_size_in_slots, 0);
int regstosave_num = 0;
const RegisterSaver::LiveRegType* live_regs = NULL;
const RegisterSaver::LiveRegType* live_regs = nullptr;
switch (reg_set) {
case all_registers:
@ -448,7 +448,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, RegisterSet reg
bool float_spilled = false;
int regstosave_num = 0;
const RegisterSaver::LiveRegType* live_regs = NULL;
const RegisterSaver::LiveRegType* live_regs = nullptr;
switch (reg_set) {
case all_registers:
@ -762,7 +762,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "second VMRegPair array not used on this platform");
assert(regs2 == nullptr, "second VMRegPair array not used on this platform");
// Calling conventions for C runtime calls and calls to JNI native methods.
const VMReg z_iarg_reg[5] = {
@ -1017,7 +1017,7 @@ static void object_move(MacroAssembler *masm,
__ add2reg(rHandle, reg2offset(src.first())+frame_offset, Z_SP);
__ load_and_test_long(Z_R0, Address(rHandle));
__ z_brne(skip);
// Use a NULL handle if oop is NULL.
// Use a null handle if oop is null.
__ clear_reg(rHandle, true, false);
__ bind(skip);
@ -1043,7 +1043,7 @@ static void object_move(MacroAssembler *masm,
__ z_stg(rOop, oop_slot_offset, Z_SP);
__ add2reg(rHandle, oop_slot_offset, Z_SP);
// If Oop == NULL, use a NULL handle.
// If Oop is null, use a null handle.
__ compare64_and_branch(rOop, (RegisterOrConstant)0L, Assembler::bcondNotEqual, skip);
__ clear_reg(rHandle, true, false);
__ bind(skip);
@ -1324,7 +1324,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet *) NULL);
(OopMapSet *) nullptr);
}
@ -1335,7 +1335,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
///////////////////////////////////////////////////////////////////////
address native_func = method->native_function();
assert(native_func != NULL, "must have function");
assert(native_func != nullptr, "must have function");
//---------------------------------------------------------------------
// We have received a description of where all the java args are located
@ -1363,7 +1363,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
BasicType* in_elem_bt = nullptr;
// Create the signature for the C call:
// 1) add the JNIEnv*
@ -1457,7 +1457,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// *_slot_offset indicates offset from SP in #stack slots
// *_offset indicates offset from SP in #bytes
int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/NULL, total_c_args) + // 1+2
int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/nullptr, total_c_args) + // 1+2
SharedRuntime::out_preserve_stack_slots(); // see c_calling_convention
// Now the space for the inbound oop handle area.
@ -2075,7 +2075,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
// Call patching needed?
__ load_and_test_long(Z_R0_scratch, method_(code));
__ z_lg(ientry, method_(interpreter_entry)); // Preload interpreter entry (also if patching).
__ z_brne(patch_callsite); // Patch required if code != NULL (compiled target exists).
__ z_brne(patch_callsite); // Patch required if code isn't null (compiled target exists).
__ bind(skip_fixup); // Return point from patch_callsite.
@ -2358,7 +2358,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_entry = __ pc();
// Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL;
address c2i_no_clinit_check_entry = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
@ -2510,7 +2510,7 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
Label exec_mode_initialized;
OopMap* map = NULL;
OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet();
unsigned int start_off = __ offset();
@ -2627,7 +2627,7 @@ void SharedRuntime::generate_deopt_blob() {
// occur so we don't need an oopmap. the value of the pc in the
// frame is not particularly important. it just needs to identify the blob.
// Don't set last_Java_pc anymore here (is implicitly NULL then).
// Don't set last_Java_pc anymore here (is implicitly null then).
// the correct PC is retrieved in pd_last_frame() in that case.
__ set_last_Java_frame(/*sp*/Z_SP, noreg);
// With EscapeAnalysis turned on, this call may safepoint
@ -2844,7 +2844,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ z_br(Z_R14);
masm->flush();
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, framesize_in_bytes/wordSize);
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, nullptr, framesize_in_bytes/wordSize);
}
#endif // COMPILER2
@ -2854,7 +2854,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Generate a special Compile2Runtime blob that saves all registers,
// and setup oopmap.
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
assert(StubRoutines::forward_exception_entry() != NULL,
assert(StubRoutines::forward_exception_entry() != nullptr,
"must be generated before");
ResourceMark rm;
@ -2866,7 +2866,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
MacroAssembler* masm = new MacroAssembler(&buffer);
unsigned int start_off = __ offset();
address call_pc = NULL;
address call_pc = nullptr;
int frame_size_in_bytes;
bool cause_return = (poll_type == POLL_AT_RETURN);
@ -2955,7 +2955,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args.
//
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code
ResourceMark rm;
@ -2964,7 +2964,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
MacroAssembler* masm = new MacroAssembler(&buffer);
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL;
OopMap* map = nullptr;
unsigned int start_off = __ offset();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ public:
inline address location(VMReg reg, intptr_t* sp) const {
Unimplemented();
return NULL;
return nullptr;
}
inline void set_location(VMReg reg, address loc) { assert_is_rfp(reg); }
@ -77,7 +77,7 @@ public:
bool should_skip_missing() const { return false; }
VMReg find_register_spilled_here(void* p, intptr_t* sp) {
Unimplemented();
return NULL;
return nullptr;
}
void print() const { print_on(tty); }
void print_on(outputStream* st) const { st->print_cr("Small register map"); }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,25 +46,25 @@ inline frame StackChunkFrameStream<frame_kind>::to_frame() const {
template <ChunkFrames frame_kind>
inline address StackChunkFrameStream<frame_kind>::get_pc() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::fp() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::derelativize(int offset) const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>
inline intptr_t* StackChunkFrameStream<frame_kind>::unextended_sp_for_interpreter_frame() const {
Unimplemented();
return NULL;
return nullptr;
}
template <ChunkFrames frame_kind>

@ -483,7 +483,7 @@ class StubGenerator: public StubCodeGenerator {
__ z_st(exception_line, thread_(exception_line));
// Complete return to VM.
assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
assert(StubRoutines::_call_stub_return_address != nullptr, "must have been generated before");
// Continue in call stub.
__ z_br(Z_ARG2);
@ -649,7 +649,7 @@ class StubGenerator: public StubCodeGenerator {
RuntimeStub::new_runtime_stub(name, &code,
frame_complete_pc - start,
framesize_in_bytes/wordSize,
NULL /*oop_maps*/, false);
nullptr /*oop_maps*/, false);
return stub->entry_point();
}
@ -685,12 +685,12 @@ class StubGenerator: public StubCodeGenerator {
const Register Rarray_ptr = Z_ARG5; // Current value from cache array.
if (UseCompressedOops) {
assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub");
}
// Always take the slow path.
__ check_klass_subtype_slow_path(Rsubklass, Rsuperklass,
Rarray_ptr, Rlength, NULL, &miss);
Rarray_ptr, Rlength, nullptr, &miss);
// Match falls through here.
__ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC.
@ -3171,7 +3171,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining");
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining");
} else {
// In PRODUCT builds, the function pointers will keep their initial (NULL) value.
// In PRODUCT builds, the function pointers will keep their initial (null) value.
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
assert(VM_Version::has_Crypto_AES(), "Inconsistent settings. Check vm_version_s390.cpp");
}
@ -3181,7 +3181,7 @@ class StubGenerator: public StubCodeGenerator {
if (VM_Version::has_Crypto_AES_CTR()) {
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt");
} else {
// In PRODUCT builds, the function pointers will keep their initial (NULL) value.
// In PRODUCT builds, the function pointers will keep their initial (null) value.
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
assert(VM_Version::has_Crypto_AES_CTR(), "Inconsistent settings. Check vm_version_s390.cpp");
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -33,12 +33,12 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::zarch::_partial_subtype_check = NULL;
address StubRoutines::zarch::_partial_subtype_check = nullptr;
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
address StubRoutines::zarch::_trot_table_addr = NULL;
address StubRoutines::zarch::_trot_table_addr = nullptr;
address StubRoutines::zarch::_nmethod_entry_barrier = NULL;
address StubRoutines::zarch::_nmethod_entry_barrier = nullptr;
int StubRoutines::zarch::_atomic_memory_operation_lock = StubRoutines::zarch::unlocked;
@ -48,7 +48,7 @@ void StubRoutines::zarch::generate_load_absolute_address(MacroAssembler* masm, R
__ load_absolute_address(table, table_addr);
#ifdef ASSERT
assert(table_addr != NULL, "CRC lookup table address must be initialized by now");
assert(table_addr != nullptr, "CRC lookup table address must be initialized by now");
assert(*((uint32_t*)(table_addr+4)) == (uint32_t)table_contents, "Bad CRC lookup table: 0x%8.8x, expected 0x%8.8x", *((uint32_t*)(table_addr+4)), (uint32_t)table_contents);
{
Label L;
@ -90,7 +90,7 @@ void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Re
__ relocate(rspec);
__ load_absolute_address(table, _trot_table_addr);
#ifdef ASSERT
assert(_trot_table_addr != NULL, "Translate table address must be initialized by now");
assert(_trot_table_addr != nullptr, "Translate table address must be initialized by now");
assert((p2i(_trot_table_addr) & (TROT_ALIGNMENT-1)) == 0, "Translate table alignment error");
for (int i = 0; i < 256; i++) {
assert(i == *((jshort*)(_trot_table_addr+2*i)), "trot_table[%d] = %d", i, *((jshort*)(_trot_table_addr+2*i)));

@ -281,7 +281,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// don't dereference it as in case of ints, floats, etc..
// UNBOX argument
// Load reference and check for NULL.
// Load reference and check for null.
Label do_int_Entry4Boxed;
__ bind(do_boxed);
{
@ -589,7 +589,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
}
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both");
assert(!pass_oop || message == nullptr, "either oop or message but not both");
address entry = __ pc();
BLOCK_COMMENT("exception_handler_common {");
@ -597,7 +597,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
// Expression stack must be empty before entering the VM if an
// exception happened.
__ empty_expression_stack();
if (name != NULL) {
if (name != nullptr) {
__ load_absolute_address(Z_ARG2, (address)name);
} else {
__ clear_reg(Z_ARG2, true, false);
@ -608,7 +608,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception),
Z_ARG2, Z_tos /*object (see TT::aastore())*/);
} else {
if (message != NULL) {
if (message != nullptr) {
__ load_absolute_address(Z_ARG3, (address)message);
} else {
__ clear_reg(Z_ARG3, true, false);
@ -638,7 +638,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ resize_frame_absolute(sp_before_i2c_extension, Z_locals/*tmp*/, true/*load_fp*/);
// TODO(ZASM): necessary??
// // and NULL it as marker that esp is now tos until next java call
// // and null it as marker that esp is now tos until next java call
// __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
@ -683,7 +683,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
BLOCK_COMMENT("deopt_entry {");
// TODO(ZASM): necessary? NULL last_sp until next java call
// TODO(ZASM): necessary? null last_sp until next java call
// __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer.
__ restore_bcp();
@ -701,7 +701,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
__ should_not_reach_here();
__ bind(L);
}
if (continuation == NULL) {
if (continuation == nullptr) {
__ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation, Z_R1_scratch);
@ -780,8 +780,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
// InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second
// indicates if the counter overflow occurs at a backwards branch
// (NULL bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or NULL if the
// (null bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or null if the
// compilation did not complete (either went background or bailed
// out).
__ clear_reg(Z_ARG2);
@ -812,7 +812,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_
// Get the stack base, and in debug, verify it is non-zero.
__ z_lg(tmp1, thread_(stack_base));
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
NearLabel base_not_zero;
__ compareU64_and_branch(tmp1, (intptr_t)0L, Assembler::bcondNotEqual, base_not_zero);
reentry = __ stop_chain_static(reentry, "stack base is zero in generate_stack_overflow_check");
@ -850,7 +850,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_
// Note also that the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
__ load_absolute_address(tmp1, StubRoutines::throw_StackOverflowError_entry());
__ z_br(tmp1);
@ -875,7 +875,7 @@ void TemplateInterpreterGenerator::lock_method(void) {
__ get_method(method);
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{
Label L;
__ testbit(method2_(method, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
@ -909,7 +909,7 @@ void TemplateInterpreterGenerator::lock_method(void) {
{
NearLabel L;
__ compare64_and_branch(object, (intptr_t) 0, Assembler::bcondNotEqual, L);
reentry = __ stop_chain_static(reentry, "synchronization object is NULL");
reentry = __ stop_chain_static(reentry, "synchronization object is null");
__ bind(L);
}
#endif // ASSERT
@ -1132,12 +1132,12 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// z_ijava_state->locals = Z_esp + parameter_count bytes
__ z_stg(Z_locals, _z_ijava_state_neg(locals), fp);
// z_ijava_state->oop_temp = NULL;
// z_ijava_state->oop_temp = nullptr;
__ store_const(Address(fp, oop_tmp_offset), 0);
// Initialize z_ijava_state->mdx.
Register Rmdp = Z_bcp;
// native_call: assert that mdo == NULL
// native_call: assert that mdo is null
const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call);
if (ProfileInterpreter && check_for_mdo) {
Label get_continue;
@ -1207,7 +1207,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.
bool use_instruction = false;
address runtime_entry = NULL;
address runtime_entry = nullptr;
int num_args = 1;
bool double_precision = true;
@ -1236,7 +1236,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
}
// Use normal entry if neither instruction nor runtime call is used.
if (!use_instruction && runtime_entry == NULL) return NULL;
if (!use_instruction && runtime_entry == nullptr) return nullptr;
address entry = __ pc();
@ -1343,7 +1343,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Make sure method is native and not abstract.
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{ Label L;
__ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT);
__ z_btrue(L);
@ -1711,7 +1711,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
// Make sure method is not native and not abstract.
// Rethink these assertions - they can be simplified and shared.
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{ Label L;
__ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT);
__ z_bfalse(L);
@ -2165,7 +2165,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// The member name argument must be restored if _invokestatic is
// re-executed after a PopFrame call. Detect such a case in the
// InterpreterRuntime function and return the member name
// argument, or NULL.
// argument, or null.
__ z_lg(Z_ARG2, Address(Z_locals));
__ get_method(Z_ARG3);
__ call_VM(Z_tmp_1,
@ -2378,7 +2378,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template.
address entry = Interpreter::trace_code(t->tos_in());
guarantee(entry != NULL, "entry must have been generated");
guarantee(entry != nullptr, "entry must have been generated");
__ call_stub(entry);
}

@ -138,7 +138,7 @@ static inline Address aaddress(int n) {
return iaddress(n);
}
// Pass NULL, if no shift instruction should be emitted.
// Pass null, if no shift instruction should be emitted.
static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) {
if (masm) {
masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
@ -146,7 +146,7 @@ static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) {
return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0));
}
// Pass NULL, if no shift instruction should be emitted.
// Pass null, if no shift instruction should be emitted.
static inline Address laddress(InterpreterMacroAssembler *masm, Register r) {
if (masm) {
masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
@ -464,7 +464,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ z_ltgr(Z_tos, Z_tos);
__ z_bre(L_do_resolve);
// Convert null sentinel to NULL.
// Convert null sentinel to null.
__ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr());
__ resolve_oop_handle(Z_R1_scratch);
__ z_cg(Z_tos, Address(Z_R1_scratch));
@ -1167,7 +1167,7 @@ void TemplateTable::aastore() {
// Address where the store goes to, i.e. &(Rarry[index])
__ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
// do array store check - check for NULL value first.
// do array store check - check for null value first.
__ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null);
Register Rsub_klass = Z_ARG4;
@ -1191,11 +1191,11 @@ void TemplateTable::aastore() {
Register tmp3 = Rsub_klass;
// Have a NULL in Rvalue.
// Have a null in Rvalue.
__ bind(is_null);
__ profile_null_seen(tmp1);
// Store a NULL.
// Store a null.
do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), noreg,
tmp3, tmp2, tmp1, IS_ARRAY);
__ z_bru(done);
@ -1937,7 +1937,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Address mask(mdo, MethodData::backedge_mask_offset());
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
Z_ARG2, false, Assembler::bcondZero,
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ z_bru(dispatch);
__ bind(no_mdo);
}
@ -1948,7 +1948,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ increment_mask_and_jump(Address(m_counters, be_offset),
increment, mask,
Z_ARG2, false, Assembler::bcondZero,
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ bind(dispatch);
}
@ -1972,7 +1972,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
Z_ARG2);
// Z_RET: osr nmethod (osr ok) or NULL (osr not possible).
// Z_RET: osr nmethod (osr ok) or null (osr not possible).
__ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
// Nmethod may have been invalidated (VM may block upon call_VM return).
@ -2385,7 +2385,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ load_resolved_method_at_index(byte_no, cache, cpe_offset, method);
__ load_method_holder(klass, method);
__ clinit_barrier(klass, Z_thread, NULL /*L_fast_path*/, &clinit_barrier_slow);
__ clinit_barrier(klass, Z_thread, nullptr /*L_fast_path*/, &clinit_barrier_slow);
}
BLOCK_COMMENT("} resolve_cache_and_index");
@ -2547,12 +2547,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
Z_ARG3, Z_R1_scratch);
if (is_static) {
__ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC.
__ clear_reg(Z_ARG2, true, false); // null object reference. Don't set CC.
} else {
__ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it.
__ verify_oop(Z_ARG2);
}
// Z_ARG2: object pointer or NULL
// Z_ARG2: object pointer or null
// Z_ARG3: cache entry pointer
__ call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
@ -2864,7 +2864,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache,
// object(tos)
__ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize));
// Z_ARG2: object pointer set up above (NULL if static)
// Z_ARG2: object pointer set up above (null if static)
// Z_ARG3: cache entry pointer
// Z_ARG4: jvalue object on the stack
__ call_VM(noreg,
@ -3786,7 +3786,7 @@ void TemplateTable::invokedynamic(int byte_no) {
// spec jbb2005 shows no measurable performance degradation.
void TemplateTable::_new() {
transition(vtos, atos);
address prev_instr_address = NULL;
address prev_instr_address = nullptr;
Register tags = Z_tmp_1;
Register RallocatedObject = Z_tos;
Register cpool = Z_ARG2;
@ -3930,7 +3930,7 @@ void TemplateTable::checkcast() {
NearLabel done, is_null, ok_is_subtype, quicked, resolved;
BLOCK_COMMENT("checkcast {");
// If object is NULL, we are almost done.
// If object is null, we are almost done.
__ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
// Get cpool & tags index.
@ -3984,7 +3984,7 @@ void TemplateTable::checkcast() {
__ z_lgr(Z_tos, receiver); // Restore object.
// Collect counts on whether this test sees NULLs a lot or not.
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
__ z_bru(done);
__ bind(is_null);
@ -4003,7 +4003,7 @@ void TemplateTable::instanceof() {
NearLabel done, is_null, ok_is_subtype, quicked, resolved;
BLOCK_COMMENT("instanceof {");
// If object is NULL, we are almost done.
// If object is null, we are almost done.
__ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
// Get cpool & tags index.
@ -4054,7 +4054,7 @@ void TemplateTable::instanceof() {
__ bind(ok_is_subtype);
__ load_const_optimized(Z_tos, 1);
// Collect counts on whether this test sees NULLs a lot or not.
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
__ z_bru(done);
__ bind(is_null);
@ -4064,8 +4064,8 @@ void TemplateTable::instanceof() {
}
__ bind(done);
// tos = 0: obj == NULL or obj is not an instanceof the specified klass
// tos = 1: obj != NULL and obj is an instanceof the specified klass
// tos = 0: obj is null or obj is not an instanceof the specified klass
// tos = 1: obj isn't null and obj is an instanceof the specified klass
BLOCK_COMMENT("} instanceof");
}
@ -4141,13 +4141,13 @@ void TemplateTable::monitorenter() {
BLOCK_COMMENT("monitorenter {");
// Check for NULL object.
// Check for null object.
__ null_check(Z_tos);
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
NearLabel allocated;
// Initialize entry pointer.
const Register Rfree_slot = Z_tmp_1;
__ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC.
__ clear_reg(Rfree_slot, true, false); // Points to free slot or null. Don't set CC.
// Find a free slot in the monitor block from top to bot (result in Rfree_slot).
{
@ -4160,7 +4160,7 @@ void TemplateTable::monitorenter() {
__ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{ NearLabel ok;
__ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");
@ -4193,7 +4193,7 @@ void TemplateTable::monitorenter() {
__ bind(exit);
}
// Rfree_slot != NULL -> found one
// Rfree_slot isn't null -> found one
__ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated);
// Allocate one if there's no free slot.
@ -4230,7 +4230,7 @@ void TemplateTable::monitorexit() {
BLOCK_COMMENT("monitorexit {");
// Check for NULL object.
// Check for null object.
__ null_check(Z_tos);
NearLabel found, not_found;
@ -4248,7 +4248,7 @@ void TemplateTable::monitorexit() {
__ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
#ifdef ASSERT
address reentry = NULL;
address reentry = nullptr;
{ NearLabel ok;
__ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");

@ -717,7 +717,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
// - LPAR
// - whole "Box" (CPUs )
// - z/VM / KVM (VM<nn>); this is not available in an LPAR-only setup
const char* kw[] = { "LPAR", "CPUs", "VM", NULL };
const char* kw[] = { "LPAR", "CPUs", "VM", nullptr };
const char* info_file = "/proc/sysinfo";
if (!print_matching_lines_from_file(info_file, st, kw)) {
@ -842,7 +842,7 @@ void VM_Version::set_features_from(const char* march) {
bool err = false;
bool prt = false;
if ((march != NULL) && (march[0] != '\0')) {
if ((march != nullptr) && (march[0] != '\0')) {
const int buf_len = 16;
const int hdr_len = 5;
char buf[buf_len];
@ -909,10 +909,10 @@ void VM_Version::set_features_from(const char* march) {
// < 0: failure: required number of feature bit string words (buffer too small).
// == 0: failure: operation aborted.
//
static long (*getFeatures)(unsigned long*, int, int) = NULL;
static long (*getFeatures)(unsigned long*, int, int) = nullptr;
void VM_Version::set_getFeatures(address entryPoint) {
if (getFeatures == NULL) {
if (getFeatures == nullptr) {
getFeatures = (long(*)(unsigned long*, int, int))entryPoint;
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -49,9 +49,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
@ -82,7 +82,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
const Register rcvr_klass = Z_R1_scratch;
address npe_addr = __ pc(); // npe == NULL ptr exception
address npe_addr = __ pc(); // npe is short for null pointer exception
// Get receiver klass.
__ load_klass(rcvr_klass, Z_ARG1);
@ -152,9 +152,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
@ -193,7 +193,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Get receiver klass.
// Must do an explicit check if offset too large or implicit checks are disabled.
address npe_addr = __ pc(); // npe == NULL ptr exception
address npe_addr = __ pc(); // npe is short for null pointer exception
__ load_klass(rcvr_klass, Z_ARG1);
// Receiver subtype check against REFC.