8301493: Replace NULL with nullptr in cpu/aarch64
Reviewed-by: tschatzl, gziemski, dholmes
This commit is contained in:
parent
f94f957734
commit
948f3b3c24
src/hotspot/cpu/aarch64
assembler_aarch64.cppc1_CodeStubs_aarch64.cppc1_LIRAssembler_aarch64.cppc1_LIRAssembler_aarch64.hppc1_LIRGenerator_aarch64.cppc1_MacroAssembler_aarch64.cppc1_MacroAssembler_aarch64.hppc1_Runtime1_aarch64.cppc2_CodeStubs_aarch64.cppc2_MacroAssembler_aarch64.cppcodeBuffer_aarch64.cppcompiledIC_aarch64.cppcontinuationFreezeThaw_aarch64.inline.hppdisassembler_aarch64.hppdowncallLinker_aarch64.cppframe_aarch64.cppframe_aarch64.inline.hppglobals_aarch64.hppicBuffer_aarch64.cppicache_aarch64.cppinterp_masm_aarch64.cppinterp_masm_aarch64.hppinterpreterRT_aarch64.cppjavaFrameAnchor_aarch64.hppjniFastGetField_aarch64.cppmacroAssembler_aarch64.cppmacroAssembler_aarch64.hppmatcher_aarch64.hppmethodHandles_aarch64.cppnativeInst_aarch64.cppnativeInst_aarch64.hppregisterMap_aarch64.cppregisterMap_aarch64.hpprelocInfo_aarch64.cppsharedRuntime_aarch64.cppstubGenerator_aarch64.cppstubRoutines_aarch64.cpptemplateInterpreterGenerator_aarch64.cpptemplateTable_aarch64.cppvm_version_aarch64.cppvtableStubs_aarch64.cpp
gc
g1
shared
shenandoah
@ -102,7 +102,7 @@ void Assembler::emit_data64(jlong data,
|
||||
RelocationHolder const& rspec,
|
||||
int format) {
|
||||
|
||||
assert(inst_mark() != NULL, "must be inside InstructionMark");
|
||||
assert(inst_mark() != nullptr, "must be inside InstructionMark");
|
||||
// Do not use AbstractAssembler::relocate, which is not intended for
|
||||
// embedded words. Instead, relocate to the enclosing instruction.
|
||||
code_section()->relocate(inst_mark(), rspec, format);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -44,7 +44,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||
__ adr(rscratch1, safepoint_pc);
|
||||
__ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
|
||||
@ -334,7 +334,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
|
||||
relocInfo::static_call_type);
|
||||
address call = __ trampoline_call(resolve);
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
ce->bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
|
||||
address LIR_Assembler::float_constant(float f) {
|
||||
address const_addr = __ float_constant(f);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -122,7 +122,7 @@ address LIR_Assembler::float_constant(float f) {
|
||||
|
||||
address LIR_Assembler::double_constant(double d) {
|
||||
address const_addr = __ double_constant(d);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -132,7 +132,7 @@ address LIR_Assembler::double_constant(double d) {
|
||||
|
||||
address LIR_Assembler::int_constant(jlong n) {
|
||||
address const_addr = __ long_constant(n);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -278,7 +278,7 @@ void LIR_Assembler::osr_entry() {
|
||||
Label L;
|
||||
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
|
||||
__ cbnz(rscratch1, L);
|
||||
__ stop("locked object is NULL");
|
||||
__ stop("locked object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
@ -328,7 +328,7 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
|
||||
if (o == NULL) {
|
||||
if (o == nullptr) {
|
||||
__ mov(reg, zr);
|
||||
} else {
|
||||
__ movoop(reg, o);
|
||||
@ -336,7 +336,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
|
||||
address target = NULL;
|
||||
address target = nullptr;
|
||||
relocInfo::relocType reloc_type = relocInfo::none;
|
||||
|
||||
switch (patching_id(info)) {
|
||||
@ -379,7 +379,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
|
||||
int LIR_Assembler::emit_exception_handler() {
|
||||
// generate code for exception handler
|
||||
address handler_base = __ start_a_stub(exception_handler_size());
|
||||
if (handler_base == NULL) {
|
||||
if (handler_base == nullptr) {
|
||||
// not enough space left for the handler
|
||||
bailout("exception handler overflow");
|
||||
return -1;
|
||||
@ -427,7 +427,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
}
|
||||
|
||||
// Perform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
MonitorExitStub* stub = nullptr;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::r0_opr);
|
||||
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
|
||||
@ -455,7 +455,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
if (stub != nullptr) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
@ -466,7 +466,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// generate code for exception handler
|
||||
address handler_base = __ start_a_stub(deopt_handler_size());
|
||||
if (handler_base == NULL) {
|
||||
if (handler_base == nullptr) {
|
||||
// not enough space left for the handler
|
||||
bailout("deopt handler overflow");
|
||||
return -1;
|
||||
@ -487,7 +487,7 @@ void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
|
||||
int pc_offset = code_offset();
|
||||
flush_debug_info(pc_offset);
|
||||
info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
|
||||
if (info->exception_handlers() != NULL) {
|
||||
if (info->exception_handlers() != nullptr) {
|
||||
compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
|
||||
}
|
||||
}
|
||||
@ -509,7 +509,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
}
|
||||
|
||||
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
|
||||
guarantee(info != NULL, "Shouldn't be NULL");
|
||||
guarantee(info != nullptr, "Shouldn't be null");
|
||||
__ get_polling_page(rscratch1, relocInfo::poll_type);
|
||||
add_debug_info_for_branch(info); // This isn't just debug info:
|
||||
// it's the oop map
|
||||
@ -604,14 +604,14 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
if (! c->as_jobject())
|
||||
__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
else {
|
||||
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
|
||||
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
{
|
||||
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr);
|
||||
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
|
||||
}
|
||||
case T_INT:
|
||||
@ -775,7 +775,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
|
||||
LIR_Address* to_addr = dest->as_address_ptr();
|
||||
PatchingStub* patch = NULL;
|
||||
PatchingStub* patch = nullptr;
|
||||
Register compressed_src = rscratch1;
|
||||
|
||||
if (patch_code != lir_patch_none) {
|
||||
@ -847,7 +847,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check(null_check_here, info);
|
||||
}
|
||||
}
|
||||
@ -890,7 +890,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
|
||||
|
||||
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
|
||||
address target = NULL;
|
||||
address target = nullptr;
|
||||
relocInfo::relocType reloc_type = relocInfo::none;
|
||||
|
||||
switch (patching_id(info)) {
|
||||
@ -943,7 +943,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
return;
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
int null_check_here = code_offset();
|
||||
@ -1053,13 +1053,13 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
|
||||
|
||||
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
|
||||
#ifdef ASSERT
|
||||
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
|
||||
if (op->block() != NULL) _branch_target_blocks.append(op->block());
|
||||
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
|
||||
assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
|
||||
if (op->block() != nullptr) _branch_target_blocks.append(op->block());
|
||||
if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
|
||||
#endif
|
||||
|
||||
if (op->cond() == lir_cond_always) {
|
||||
if (op->info() != NULL) add_debug_info_for_branch(op->info());
|
||||
if (op->info() != nullptr) add_debug_info_for_branch(op->info());
|
||||
__ b(*(op->label()));
|
||||
} else {
|
||||
Assembler::Condition acond;
|
||||
@ -1288,12 +1288,12 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
|
||||
if (should_profile) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
assert(method != NULL, "Should have method");
|
||||
assert(method != nullptr, "Should have method");
|
||||
int bci = op->profiled_bci();
|
||||
md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
assert(md != nullptr, "Sanity");
|
||||
data = md->bci_to_data(bci);
|
||||
assert(data != NULL, "need data for type check");
|
||||
assert(data != nullptr, "need data for type check");
|
||||
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
}
|
||||
Label profile_cast_success, profile_cast_failure;
|
||||
@ -1375,7 +1375,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
}
|
||||
} else {
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
@ -1428,12 +1428,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
|
||||
if (should_profile) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
assert(method != NULL, "Should have method");
|
||||
assert(method != nullptr, "Should have method");
|
||||
int bci = op->profiled_bci();
|
||||
md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
assert(md != nullptr, "Sanity");
|
||||
data = md->bci_to_data(bci);
|
||||
assert(data != NULL, "need data for type check");
|
||||
assert(data != nullptr, "need data for type check");
|
||||
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
}
|
||||
Label profile_cast_success, profile_cast_failure, done;
|
||||
@ -1466,7 +1466,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
// get instance klass (it's already uncompressed)
|
||||
__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
@ -1620,7 +1620,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
} else if (opr1->is_constant()) {
|
||||
LIR_Opr tmp
|
||||
= opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
|
||||
const2reg(opr1, tmp, lir_patch_none, NULL);
|
||||
const2reg(opr1, tmp, lir_patch_none, nullptr);
|
||||
opr1 = tmp;
|
||||
}
|
||||
|
||||
@ -1630,7 +1630,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
} else if (opr2->is_constant()) {
|
||||
LIR_Opr tmp
|
||||
= opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
|
||||
const2reg(opr2, tmp, lir_patch_none, NULL);
|
||||
const2reg(opr2, tmp, lir_patch_none, nullptr);
|
||||
opr2 = tmp;
|
||||
}
|
||||
|
||||
@ -1641,7 +1641,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
|
||||
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
|
||||
assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
|
||||
|
||||
if (left->is_single_cpu()) {
|
||||
Register lreg = left->as_register();
|
||||
@ -2033,7 +2033,7 @@ void LIR_Assembler::align_call(LIR_Code code) { }
|
||||
|
||||
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
address call = __ trampoline_call(Address(op->addr(), rtype));
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -2044,7 +2044,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
|
||||
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
address call = __ ic_call(op->addr());
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -2055,7 +2055,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
void LIR_Assembler::emit_static_call_stub() {
|
||||
address call_pc = __ pc();
|
||||
address stub = __ start_a_stub(call_stub_size());
|
||||
if (stub == NULL) {
|
||||
if (stub == nullptr) {
|
||||
bailout("static call stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -2226,11 +2226,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
CodeStub* stub = op->stub();
|
||||
int flags = op->flags();
|
||||
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
|
||||
|
||||
// if we don't know anything, just go through the generic arraycopy
|
||||
if (default_type == NULL // || basic_type == T_OBJECT
|
||||
if (default_type == nullptr // || basic_type == T_OBJECT
|
||||
) {
|
||||
Label done;
|
||||
assert(src == r1 && src_pos == r2, "mismatch in calling convention");
|
||||
@ -2242,7 +2242,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ str(src, Address(sp, 4*BytesPerWord));
|
||||
|
||||
address copyfunc_addr = StubRoutines::generic_arraycopy();
|
||||
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
|
||||
assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
|
||||
|
||||
// The arguments are in java calling convention so we shift them
|
||||
// to C convention
|
||||
@ -2282,7 +2282,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
|
||||
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
|
||||
|
||||
int elem_size = type2aelembytes(basic_type);
|
||||
int scale = exact_log2(elem_size);
|
||||
@ -2292,7 +2292,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
|
||||
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
|
||||
|
||||
// test for NULL
|
||||
// test for null
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ cbz(src, *stub->entry());
|
||||
}
|
||||
@ -2377,7 +2377,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ load_klass(src, src);
|
||||
__ load_klass(dst, dst);
|
||||
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
|
||||
|
||||
__ PUSH(src, dst);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
@ -2389,7 +2389,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ POP(src, dst);
|
||||
|
||||
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
|
||||
if (copyfunc_addr != NULL) { // use stub if available
|
||||
if (copyfunc_addr != nullptr) { // use stub if available
|
||||
// src is not a sub class of dst so we have to do a
|
||||
// per-element check.
|
||||
|
||||
@ -2559,7 +2559,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
if (UseHeavyMonitors) {
|
||||
if (op->info() != NULL) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ null_check(obj, -1);
|
||||
}
|
||||
@ -2568,7 +2568,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// add debug info for NullPointerException only if one is possible
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != NULL) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check(null_check_offset, op->info());
|
||||
}
|
||||
// done
|
||||
@ -2586,7 +2586,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
|
||||
Register result = op->result_opr()->as_pointer_register();
|
||||
|
||||
CodeEmitInfo* info = op->info();
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
@ -2605,9 +2605,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
assert(md != nullptr, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
|
||||
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
|
||||
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
||||
Register mdo = op->mdo()->as_register();
|
||||
__ mov_metadata(mdo, md->constant_encoding());
|
||||
@ -2620,7 +2620,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
assert_different_registers(mdo, recv);
|
||||
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
|
||||
ciKlass* known_klass = op->known_holder();
|
||||
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
|
||||
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
|
||||
// We know the type that will be seen at this call site; we can
|
||||
// statically update the MethodData* rather than needing to do
|
||||
// dynamic tests on the receiver type
|
||||
@ -2645,7 +2645,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
// VirtualCallData rather than just the first time
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
ciKlass* receiver = vc_data->receiver(i);
|
||||
if (receiver == NULL) {
|
||||
if (receiver == nullptr) {
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
|
||||
__ mov_metadata(rscratch1, known_klass->constant_encoding());
|
||||
__ lea(rscratch2, recv_addr);
|
||||
@ -2712,7 +2712,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
Label update, next, none;
|
||||
|
||||
bool do_null = !not_null;
|
||||
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
|
||||
bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
|
||||
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
|
||||
|
||||
assert(do_null || do_update, "why are we here?");
|
||||
@ -2748,7 +2748,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
if (do_update) {
|
||||
#ifdef ASSERT
|
||||
if (exact_klass != NULL) {
|
||||
if (exact_klass != nullptr) {
|
||||
Label ok;
|
||||
__ load_klass(tmp, tmp);
|
||||
__ mov_metadata(rscratch1, exact_klass->constant_encoding());
|
||||
@ -2759,8 +2759,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
}
|
||||
#endif
|
||||
if (!no_conflict) {
|
||||
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
|
||||
if (exact_klass != NULL) {
|
||||
if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
|
||||
if (exact_klass != nullptr) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
} else {
|
||||
__ load_klass(tmp, tmp);
|
||||
@ -2789,7 +2789,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
__ cbz(rscratch1, next);
|
||||
}
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
|
||||
|
||||
__ ldr(tmp, mdo_addr);
|
||||
@ -2810,7 +2810,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
}
|
||||
} else {
|
||||
// There's a single possible klass at this profile point
|
||||
assert(exact_klass != NULL, "should be");
|
||||
assert(exact_klass != nullptr, "should be");
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
__ ldr(rscratch2, mdo_addr);
|
||||
@ -2839,7 +2839,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
// first time here. Set profile type.
|
||||
__ str(tmp, mdo_addr);
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
|
||||
|
||||
__ ldr(tmp, mdo_addr);
|
||||
@ -2903,7 +2903,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
|
||||
__ blr(rscratch1);
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_call_info_here(info);
|
||||
}
|
||||
__ post_call_nop();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -60,7 +60,7 @@ friend class ArrayCopyStub;
|
||||
void casw(Register addr, Register newval, Register cmpval);
|
||||
void casl(Register addr, Register newval, Register cmpval);
|
||||
|
||||
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL);
|
||||
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = nullptr);
|
||||
|
||||
static const int max_tableswitches = 20;
|
||||
struct tableswitch switches[max_tableswitches];
|
||||
|
@ -107,11 +107,11 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
|
||||
|
||||
|
||||
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
if (v->type()->as_IntConstant() != nullptr) {
|
||||
return v->type()->as_IntConstant()->value() == 0L;
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
} else if (v->type()->as_LongConstant() != nullptr) {
|
||||
return v->type()->as_LongConstant()->value() == 0L;
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
} else if (v->type()->as_ObjectConstant() != nullptr) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else {
|
||||
return false;
|
||||
@ -120,11 +120,11 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
|
||||
|
||||
bool LIRGenerator::can_inline_as_constant(Value v) const {
|
||||
// FIXME: Just a guess
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
if (v->type()->as_IntConstant() != nullptr) {
|
||||
return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
} else if (v->type()->as_LongConstant() != nullptr) {
|
||||
return v->type()->as_LongConstant()->value() == 0L;
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
} else if (v->type()->as_ObjectConstant() != nullptr) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else {
|
||||
return false;
|
||||
@ -315,7 +315,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
||||
// "lock" stores the address of the monitor stack slot, so this is not an oop
|
||||
LIR_Opr lock = new_register(T_INT);
|
||||
|
||||
CodeEmitInfo* info_for_exception = NULL;
|
||||
CodeEmitInfo* info_for_exception = nullptr;
|
||||
if (x->needs_null_check()) {
|
||||
info_for_exception = state_for(x);
|
||||
}
|
||||
@ -466,7 +466,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
|
||||
}
|
||||
}
|
||||
rlock_result(x);
|
||||
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
|
||||
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,9 +512,9 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
|
||||
LIR_Opr ill = LIR_OprFact::illegalOpr;
|
||||
if (x->op() == Bytecodes::_irem) {
|
||||
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
|
||||
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
|
||||
} else if (x->op() == Bytecodes::_idiv) {
|
||||
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
|
||||
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
|
||||
}
|
||||
|
||||
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
|
||||
@ -547,7 +547,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
|
||||
// when an operand with use count 1 is the left operand, then it is
|
||||
// likely that no move for 2-operand-LIR-form is necessary
|
||||
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
|
||||
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
|
||||
x->swap_operands();
|
||||
}
|
||||
|
||||
@ -800,7 +800,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
|
||||
LIR_Opr calc_result = rlock_result(x);
|
||||
LIR_Opr result_reg = result_register_for(x->type());
|
||||
|
||||
CallingConvention* cc = NULL;
|
||||
CallingConvention* cc = nullptr;
|
||||
|
||||
if (x->id() == vmIntrinsics::_dpow) {
|
||||
LIRItem value1(x->argument_at(1), this);
|
||||
@ -822,49 +822,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
|
||||
|
||||
switch (x->id()) {
|
||||
case vmIntrinsics::_dexp:
|
||||
if (StubRoutines::dexp() != NULL) {
|
||||
if (StubRoutines::dexp() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dlog:
|
||||
if (StubRoutines::dlog() != NULL) {
|
||||
if (StubRoutines::dlog() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dlog10:
|
||||
if (StubRoutines::dlog10() != NULL) {
|
||||
if (StubRoutines::dlog10() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dpow:
|
||||
if (StubRoutines::dpow() != NULL) {
|
||||
if (StubRoutines::dpow() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dsin:
|
||||
if (StubRoutines::dsin() != NULL) {
|
||||
if (StubRoutines::dsin() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dcos:
|
||||
if (StubRoutines::dcos() != NULL) {
|
||||
if (StubRoutines::dcos() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
|
||||
}
|
||||
break;
|
||||
case vmIntrinsics::_dtan:
|
||||
if (StubRoutines::dtan() != NULL) {
|
||||
if (StubRoutines::dtan() != nullptr) {
|
||||
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
|
||||
} else {
|
||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
|
||||
@ -1161,7 +1161,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
|
||||
LIRItem length(x->length(), this);
|
||||
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
|
||||
// and therefore provide the state before the parameters have been consumed
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
}
|
||||
@ -1194,14 +1194,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
|
||||
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
Values* dims = x->dims();
|
||||
int i = dims->length();
|
||||
LIRItemList* items = new LIRItemList(i, i, NULL);
|
||||
LIRItemList* items = new LIRItemList(i, i, nullptr);
|
||||
while (i-- > 0) {
|
||||
LIRItem* size = new LIRItem(dims->at(i), this);
|
||||
items->at_put(i, size);
|
||||
}
|
||||
|
||||
// Evaluate state_for early since it may emit code.
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
||||
@ -1248,7 +1248,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
|
||||
void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
LIRItem obj(x->obj(), this);
|
||||
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
|
||||
// must do this before locking the destination register as an oop register,
|
||||
// and before the obj is loaded (the latter is for deoptimization)
|
||||
@ -1263,10 +1263,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
|
||||
CodeStub* stub;
|
||||
if (x->is_incompatible_class_change_check()) {
|
||||
assert(patching_info == NULL, "can't patch this");
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
|
||||
} else if (x->is_invokespecial_receiver_check()) {
|
||||
assert(patching_info == NULL, "can't patch this");
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new DeoptimizeStub(info_for_exception,
|
||||
Deoptimization::Reason_class_check,
|
||||
Deoptimization::Action_none);
|
||||
@ -1289,7 +1289,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
|
||||
|
||||
// result and test object may not be in same register
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if ((!x->klass()->is_loaded() || PatchALot)) {
|
||||
// must do this before locking the destination register as an oop register
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
@ -91,7 +91,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
// displaced header address in the object header - if it is not the same, get the
|
||||
// object header instead
|
||||
lea(rscratch2, Address(obj, hdr_offset));
|
||||
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/NULL);
|
||||
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr);
|
||||
// if the object header was the same, we're done
|
||||
// if the object header was not the same, it is now in the hdr register
|
||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
@ -110,7 +110,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
sub(hdr, hdr, rscratch1);
|
||||
ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
|
||||
// for recursive locking, the result is zero => save it in the displaced header
|
||||
// location (NULL in the displaced hdr location indicates recursive locking)
|
||||
// location (null in the displaced hdr location indicates recursive locking)
|
||||
str(hdr, Address(disp_hdr, 0));
|
||||
// otherwise we don't care about the result and handle locking via runtime call
|
||||
cbnz(hdr, slow_case);
|
||||
@ -129,7 +129,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
||||
|
||||
// load displaced header
|
||||
ldr(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is NULL we had recursive locking
|
||||
// if the loaded hdr is null we had recursive locking
|
||||
// if we had recursive locking, we are done
|
||||
cbz(hdr, done);
|
||||
// load object
|
||||
@ -294,7 +294,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
|
||||
|
||||
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
verify_oop(receiver);
|
||||
// explicit NULL check not needed since load from [klass_offset] causes a trap
|
||||
// explicit null check not needed since load from [klass_offset] causes a trap
|
||||
// check against inline cache
|
||||
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
||||
|
||||
@ -311,7 +311,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
|
||||
|
||||
// Insert nmethod entry barrier into frame.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
|
||||
bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::remove_frame(int framesize) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -106,7 +106,7 @@ using MacroAssembler::null_check;
|
||||
void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN;
|
||||
|
||||
// This platform only uses signal-based null checks. The Label is not needed.
|
||||
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
|
||||
void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
|
||||
|
||||
void load_parameter(int offset_in_words, Register reg);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -369,7 +369,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
|
||||
// Save registers, if required.
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
OopMap* oop_map = NULL;
|
||||
OopMap* oop_map = nullptr;
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
// We're handling an exception in the context of a compiled frame.
|
||||
@ -542,7 +542,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
// Note: This number affects also the RT-Call in generate_handle_exception because
|
||||
// the oop-map is shared for all calls.
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
|
||||
@ -616,8 +616,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
bool save_fpu_registers = true;
|
||||
|
||||
// stub code & info for the different stubs
|
||||
OopMapSet* oop_maps = NULL;
|
||||
OopMap* oop_map = NULL;
|
||||
OopMapSet* oop_maps = nullptr;
|
||||
OopMap* oop_map = nullptr;
|
||||
switch (id) {
|
||||
{
|
||||
case forward_exception_id:
|
||||
@ -834,7 +834,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));
|
||||
|
||||
Label miss;
|
||||
__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);
|
||||
__ check_klass_subtype_slow_path(r4, r0, r2, r5, nullptr, &miss);
|
||||
|
||||
// fallthrough on success:
|
||||
__ mov(rscratch1, 1);
|
||||
@ -904,7 +904,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
restore_live_registers(sasm);
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
__ leave();
|
||||
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|
||||
}
|
||||
@ -991,7 +991,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
restore_live_registers(sasm);
|
||||
__ leave();
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
|
||||
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,7 @@ int C2SafepointPollStub::max_size() const {
|
||||
}
|
||||
|
||||
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
|
||||
|
@ -287,16 +287,16 @@ void C2_MacroAssembler::string_indexof(Register str2, Register str1,
|
||||
cmp(cnt1, (u1)16); // small patterns still should be handled by simple algorithm
|
||||
br(LT, LINEAR_MEDIUM);
|
||||
mov(result, zr);
|
||||
RuntimeAddress stub = NULL;
|
||||
RuntimeAddress stub = nullptr;
|
||||
if (isL) {
|
||||
stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_ll());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_ll stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_ll stub has not been generated");
|
||||
} else if (str1_isL) {
|
||||
stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_ul());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_ul stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_ul stub has not been generated");
|
||||
} else {
|
||||
stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_uu());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_uu stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_uu stub has not been generated");
|
||||
}
|
||||
address call = trampoline_call(stub);
|
||||
if (call == nullptr) {
|
||||
@ -844,7 +844,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
}
|
||||
|
||||
bind(STUB);
|
||||
RuntimeAddress stub = NULL;
|
||||
RuntimeAddress stub = nullptr;
|
||||
switch(ae) {
|
||||
case StrIntrinsicNode::LL:
|
||||
stub = RuntimeAddress(StubRoutines::aarch64::compare_long_string_LL());
|
||||
@ -861,7 +861,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
assert(stub.target() != NULL, "compare_long_string stub has not been generated");
|
||||
assert(stub.target() != nullptr, "compare_long_string stub has not been generated");
|
||||
address call = trampoline_call(stub);
|
||||
if (call == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START));
|
||||
@ -2049,9 +2049,9 @@ void C2_MacroAssembler::vector_signum_sve(FloatRegister dst, FloatRegister src,
|
||||
}
|
||||
|
||||
bool C2_MacroAssembler::in_scratch_emit_size() {
|
||||
if (ciEnv::current()->task() != NULL) {
|
||||
if (ciEnv::current()->task() != nullptr) {
|
||||
PhaseOutput* phase_output = Compile::current()->output();
|
||||
if (phase_output != NULL && phase_output->in_scratch_emit_size()) {
|
||||
if (phase_output != nullptr && phase_output->in_scratch_emit_size()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ static bool emit_shared_trampolines(CodeBuffer* cb, CodeBuffer::SharedTrampoline
|
||||
|
||||
assert(requests->number_of_entries() >= 1, "at least one");
|
||||
const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries();
|
||||
if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == NULL) {
|
||||
if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
// mov rmethod, 0
|
||||
// jmp -4 # to self
|
||||
|
||||
if (mark == NULL) {
|
||||
if (mark == nullptr) {
|
||||
mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||
}
|
||||
|
||||
@ -54,8 +54,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
|
||||
address base = __ start_a_stub(to_interp_stub_size());
|
||||
int offset = __ offset();
|
||||
if (base == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
if (base == nullptr) {
|
||||
return nullptr; // CodeBuffer::expand failed
|
||||
}
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
@ -88,7 +88,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
|
||||
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
guarantee(stub != nullptr, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
@ -117,7 +117,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
assert(stub != nullptr, "stub not found");
|
||||
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
@ -138,7 +138,7 @@ void CompiledDirectStaticCall::verify() {
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
assert(stub != nullptr, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
|
@ -149,7 +149,7 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|
||||
// because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized
|
||||
// locals value, we don't need to change the locals value here.
|
||||
|
||||
// at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts
|
||||
// at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts
|
||||
*hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp();
|
||||
|
||||
relativize_one(vfp, hfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,7 @@
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
// and do it right.
|
||||
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
|
||||
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) {
|
||||
static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
|
||||
return here;
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
_needs_transition(needs_transition),
|
||||
_frame_complete(0),
|
||||
_frame_size_slots(0),
|
||||
_oop_maps(NULL) {
|
||||
_oop_maps(nullptr) {
|
||||
}
|
||||
|
||||
void generate();
|
||||
|
@ -96,7 +96,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
// to construct the sender and do some validation of it. This goes a long way
|
||||
// toward eliminating issues when we get in frame construction code
|
||||
|
||||
if (_cb != NULL ) {
|
||||
if (_cb != nullptr ) {
|
||||
|
||||
// First check if frame is complete and tester is reliable
|
||||
// Unfortunately we can only check frame complete for runtime stubs and nmethod
|
||||
@ -122,10 +122,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
return fp_safe;
|
||||
}
|
||||
|
||||
intptr_t* sender_sp = NULL;
|
||||
intptr_t* sender_unextended_sp = NULL;
|
||||
address sender_pc = NULL;
|
||||
intptr_t* saved_fp = NULL;
|
||||
intptr_t* sender_sp = nullptr;
|
||||
intptr_t* sender_unextended_sp = nullptr;
|
||||
address sender_pc = nullptr;
|
||||
intptr_t* saved_fp = nullptr;
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
// fp must be safe
|
||||
@ -189,7 +189,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
if (sender_pc == nullptr || sender_blob == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
@ -264,7 +264,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
|
||||
|
||||
if ( (address) this->fp()[return_addr_offset] == NULL) return false;
|
||||
if ( (address) this->fp()[return_addr_offset] == nullptr) return false;
|
||||
|
||||
|
||||
// could try and do some more potential verification of native frame if we could think of some...
|
||||
@ -298,7 +298,7 @@ void frame::patch_pc(Thread* thread, address pc) {
|
||||
*pc_addr = signed_pc;
|
||||
_pc = pc; // must be set before call to get_deopt_original_pc
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
assert(original_pc == old_pc, "expected original PC to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
_pc = original_pc;
|
||||
@ -364,7 +364,7 @@ void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
|
||||
}
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
|
||||
@ -392,11 +392,11 @@ bool frame::upcall_stub_frame_is_first() const {
|
||||
assert(is_upcall_stub_frame(), "must be optimzed entry frame");
|
||||
UpcallStub* blob = _cb->as_upcall_stub();
|
||||
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
|
||||
return jfa->last_Java_sp() == NULL;
|
||||
return jfa->last_Java_sp() == nullptr;
|
||||
}
|
||||
|
||||
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
UpcallStub* blob = _cb->as_upcall_stub();
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
@ -441,9 +441,9 @@ void frame::adjust_unextended_sp() {
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
|
||||
if (sender_cm != NULL) {
|
||||
if (sender_cm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_cm->is_deopt_entry(_pc) ||
|
||||
sender_cm->is_deopt_mh_entry(_pc)) {
|
||||
@ -568,7 +568,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
|
||||
} else {
|
||||
oop* obj_p = (oop*)tos_addr;
|
||||
obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
|
||||
obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
|
||||
}
|
||||
assert(Universe::is_in_heap_or_null(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
@ -636,7 +636,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
|
||||
intptr_t *frame::initial_deoptimization_info() {
|
||||
// Not used on aarch64, but we must return something.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#undef DESCRIBE_FP_OFFSET
|
||||
@ -716,7 +716,7 @@ void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
|
||||
printf("not a Method\n");
|
||||
} else {
|
||||
CodeBlob *cb = CodeCache::find_blob((address)pc);
|
||||
if (cb != NULL) {
|
||||
if (cb != nullptr) {
|
||||
if (cb->is_nmethod()) {
|
||||
ResourceMark rm;
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
@ -782,11 +782,11 @@ frame::frame(void* sp, void* fp, void* pc) {
|
||||
|
||||
void JavaFrameAnchor::make_walkable() {
|
||||
// last frame set?
|
||||
if (last_Java_sp() == NULL) return;
|
||||
if (last_Java_sp() == nullptr) return;
|
||||
// already walkable?
|
||||
if (walkable()) return;
|
||||
vmassert(last_Java_sp() != NULL, "not called from Java code?");
|
||||
vmassert(last_Java_pc() == NULL, "already walkable");
|
||||
vmassert(last_Java_sp() != nullptr, "not called from Java code?");
|
||||
vmassert(last_Java_pc() == nullptr, "already walkable");
|
||||
_last_Java_pc = (address)_last_Java_sp[-1];
|
||||
vmassert(walkable(), "something went wrong");
|
||||
}
|
||||
|
@ -39,11 +39,11 @@
|
||||
// Constructors:
|
||||
|
||||
inline frame::frame() {
|
||||
_pc = NULL;
|
||||
_sp = NULL;
|
||||
_unextended_sp = NULL;
|
||||
_fp = NULL;
|
||||
_cb = NULL;
|
||||
_pc = nullptr;
|
||||
_sp = nullptr;
|
||||
_unextended_sp = nullptr;
|
||||
_fp = nullptr;
|
||||
_cb = nullptr;
|
||||
_deopt_state = unknown;
|
||||
_sp_is_trusted = false;
|
||||
_on_heap = false;
|
||||
@ -60,11 +60,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
_unextended_sp = sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
_oop_map = NULL;
|
||||
_oop_map = nullptr;
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
assert(pc != NULL, "no pc?");
|
||||
assert(pc != nullptr, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
setup(pc);
|
||||
}
|
||||
@ -73,10 +73,10 @@ inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
|
||||
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
} else {
|
||||
if (_cb == SharedRuntime::deopt_blob()) {
|
||||
@ -100,10 +100,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
assert(pc != nullptr, "no pc?");
|
||||
_cb = cb;
|
||||
_oop_map = NULL;
|
||||
assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
|
||||
_oop_map = nullptr;
|
||||
assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
@ -124,7 +124,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
|
||||
|
||||
// In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
|
||||
assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
|
||||
if (cb != NULL) {
|
||||
if (cb != nullptr) {
|
||||
setup(pc);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -144,8 +144,8 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
_cb = CodeCache::find_blob_fast(pc);
|
||||
_oop_map = NULL;
|
||||
assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
|
||||
_oop_map = nullptr;
|
||||
assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
@ -171,13 +171,13 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
// call a specilaized frame constructor instead of this one.
|
||||
// Then we could use the assert below. However this assert is of somewhat dubious
|
||||
// value.
|
||||
// assert(_pc != NULL, "no pc?");
|
||||
// assert(_pc != nullptr, "no pc?");
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
@ -198,19 +198,19 @@ inline bool frame::equal(frame other) const {
|
||||
}
|
||||
|
||||
// Return unique id for this frame. The id must have a value where we can distinguish
|
||||
// identity and younger/older relationship. NULL represents an invalid (incomparable)
|
||||
// identity and younger/older relationship. null represents an invalid (incomparable)
|
||||
// frame.
|
||||
inline intptr_t* frame::id(void) const { return unextended_sp(); }
|
||||
|
||||
// Return true if the frame is older (less recent activation) than the frame represented by id
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
|
||||
return this->id() > id ; }
|
||||
|
||||
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
|
||||
|
||||
inline intptr_t* frame::link_or_null() const {
|
||||
intptr_t** ptr = (intptr_t **)addr_at(link_offset);
|
||||
return os::is_readable_pointer(ptr) ? *ptr : NULL;
|
||||
return os::is_readable_pointer(ptr) ? *ptr : nullptr;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
|
||||
@ -219,7 +219,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); retu
|
||||
inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
|
||||
|
||||
inline intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if (size > 0) {
|
||||
@ -243,7 +243,7 @@ inline int frame::compiled_frame_stack_argsize() const {
|
||||
}
|
||||
|
||||
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
|
||||
assert(mask != NULL, "");
|
||||
assert(mask != nullptr, "");
|
||||
Method* m = interpreter_frame_method();
|
||||
int bci = interpreter_frame_bci();
|
||||
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
|
||||
@ -296,7 +296,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
if (last_sp == NULL) {
|
||||
if (last_sp == nullptr) {
|
||||
return sp();
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
@ -336,13 +336,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
|
||||
|
||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||
oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
guarantee(result_adr != nullptr, "bad register save location");
|
||||
return *result_adr;
|
||||
}
|
||||
|
||||
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
||||
oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
guarantee(result_adr != nullptr, "bad register save location");
|
||||
|
||||
*result_adr = obj;
|
||||
}
|
||||
@ -356,17 +356,17 @@ inline int frame::sender_sp_ret_address_offset() {
|
||||
}
|
||||
|
||||
inline const ImmutableOopMap* frame::get_oop_map() const {
|
||||
if (_cb == NULL) return NULL;
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
if (_cb == nullptr) return nullptr;
|
||||
if (_cb->oop_maps() != nullptr) {
|
||||
NativePostCallNop* nop = nativePostCallNop_at(_pc);
|
||||
if (nop != NULL && nop->displacement() != 0) {
|
||||
if (nop != nullptr && nop->displacement() != 0) {
|
||||
int slot = ((nop->displacement() >> 24) & 0xff);
|
||||
return _cb->oop_map_for_slot(slot, _pc);
|
||||
}
|
||||
const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
|
||||
return oop_map;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@ -395,7 +395,7 @@ inline frame frame::sender_raw(RegisterMap* map) const {
|
||||
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
|
||||
|
||||
assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
|
||||
if (_cb != NULL) return sender_for_compiled_frame(map);
|
||||
if (_cb != nullptr) return sender_for_compiled_frame(map);
|
||||
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
@ -428,13 +428,13 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
// outside of update_register_map.
|
||||
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
|
||||
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
|
||||
if (oop_map() != NULL) {
|
||||
if (oop_map() != nullptr) {
|
||||
_oop_map->update_register_map(this, map);
|
||||
}
|
||||
} else {
|
||||
assert(!_cb->caller_must_gc_arguments(map->thread()), "");
|
||||
assert(!map->include_argument_oops(), "");
|
||||
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
|
||||
assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
|
||||
}
|
||||
|
||||
// Since the prolog does the save and restore of FP there is no oopmap
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -159,7 +159,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then rfp might be pointing to
|
||||
@ -210,11 +210,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
__ lsr(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ cbz(tmp1, done);
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
// crosses regions, storing null?
|
||||
|
||||
__ cbz(new_val, done);
|
||||
|
||||
// storing region crossing non-NULL, is card already dirty?
|
||||
// storing region crossing non-null, is card already dirty?
|
||||
|
||||
const Register card_addr = tmp1;
|
||||
|
||||
@ -234,7 +234,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cbzw(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// storing a region crossing, non-null oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ strb(zr, Address(card_addr));
|
||||
@ -427,7 +427,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// At this point we know new_value is non-NULL and the new_value crosses regions.
|
||||
// At this point we know new_value is non-null and the new_value crosses regions.
|
||||
// Must check to see if card is already dirty
|
||||
|
||||
const Register thread = rthread;
|
||||
@ -454,7 +454,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
|
||||
__ cbzw(rscratch1, done);
|
||||
|
||||
// storing region crossing non-NULL, card is clean.
|
||||
// storing region crossing non-null, card is clean.
|
||||
// dirty card and log.
|
||||
__ strb(zr, Address(byte_map_base, card_offset));
|
||||
|
||||
|
@ -297,7 +297,7 @@ void BarrierSetAssembler::clear_patching_epoch() {
|
||||
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
if (bs_nm == NULL) {
|
||||
if (bs_nm == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -305,13 +305,13 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
Label skip_barrier;
|
||||
NMethodPatchingType patching_type = nmethod_patching_type();
|
||||
|
||||
if (slow_path == NULL) {
|
||||
if (slow_path == nullptr) {
|
||||
guard = &local_guard;
|
||||
}
|
||||
|
||||
// If the slow path is out of line in a stub, we flip the condition
|
||||
Assembler::Condition condition = slow_path == NULL ? Assembler::EQ : Assembler::NE;
|
||||
Label& barrier_target = slow_path == NULL ? skip_barrier : *slow_path;
|
||||
Assembler::Condition condition = slow_path == nullptr ? Assembler::EQ : Assembler::NE;
|
||||
Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path;
|
||||
|
||||
__ ldrw(rscratch1, *guard);
|
||||
|
||||
@ -357,7 +357,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
}
|
||||
__ br(condition, barrier_target);
|
||||
|
||||
if (slow_path == NULL) {
|
||||
if (slow_path == nullptr) {
|
||||
__ movptr(rscratch1, (uintptr_t) StubRoutines::aarch64::method_entry_barrier());
|
||||
__ blr(rscratch1);
|
||||
__ b(skip_barrier);
|
||||
@ -374,7 +374,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
|
||||
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
|
||||
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs == NULL) {
|
||||
if (bs == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -417,5 +417,5 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
|
||||
|
||||
// make sure klass is 'reasonable', which is not zero.
|
||||
__ load_klass(obj, obj); // get klass
|
||||
__ cbz(obj, error); // if klass is NULL it is broken
|
||||
__ cbz(obj, error); // if klass is null it is broken
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then rfp might be pointing to
|
||||
@ -447,7 +447,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
// b) A parallel thread may heal the contents of addr, replacing a
|
||||
// from-space pointer held in addr with the to-space pointer
|
||||
// representing the new location of the object.
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals null
|
||||
// or it refers to an object that is not being evacuated out of
|
||||
// from-space, or it refers to the to-space version of an object that
|
||||
// is being evacuated out of from-space.
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
|
||||
define_pd_global(bool, TrapBasedNullChecks, false);
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls past to check cast
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -47,7 +47,7 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached
|
||||
// because
|
||||
// (1) the value is old (i.e., doesn't matter for scavenges)
|
||||
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
|
||||
// assert(cached_value == NULL || cached_oop->is_perm(), "must be perm oop");
|
||||
// assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
|
||||
|
||||
address start = __ pc();
|
||||
Label l;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -29,7 +29,7 @@
|
||||
void ICacheStubGenerator::generate_icache_flush(
|
||||
ICache::flush_icache_stub_t* flush_icache_stub) {
|
||||
// Give anyone who calls this a surprise
|
||||
*flush_icache_stub = (ICache::flush_icache_stub_t)NULL;
|
||||
*flush_icache_stub = nullptr;
|
||||
}
|
||||
|
||||
void ICache::initialize() {}
|
||||
|
@ -140,7 +140,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
Label L;
|
||||
ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
|
||||
cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit;
|
||||
cbz(rscratch1, L); // if (thread->jvmti_thread_state() == nullptr) exit;
|
||||
|
||||
// Initiate earlyret handling only if it is not already being processed.
|
||||
// If the flag has the earlyret_processing bit set, it means that this code
|
||||
@ -769,7 +769,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
"displached header must be first word in BasicObjectLock");
|
||||
|
||||
Label fail;
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Fast check for recursive lock.
|
||||
//
|
||||
@ -868,7 +868,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
||||
cbz(header_reg, count);
|
||||
|
||||
// Atomic swap back the old header
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
|
||||
@ -896,7 +896,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
|
||||
Label set_mdp;
|
||||
stp(r0, r1, Address(pre(sp, -2 * wordSize)));
|
||||
|
||||
// Test MDO to avoid the call if it is NULL.
|
||||
// Test MDO to avoid the call if it is null.
|
||||
ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
|
||||
cbz(r0, set_mdp);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
|
||||
@ -1287,7 +1287,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
|
||||
}
|
||||
|
||||
// In the fall-through case, we found no matching item, but we
|
||||
// observed the item[start_row] is NULL.
|
||||
// observed the item[start_row] is null.
|
||||
|
||||
// Fill in the item field and increment the count.
|
||||
int item_offset = in_bytes(item_offset_fn(start_row));
|
||||
@ -1303,13 +1303,13 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
|
||||
// Example state machine code for three profile rows:
|
||||
// // main copy of decision tree, rooted at row[1]
|
||||
// if (row[0].rec == rec) { row[0].incr(); goto done; }
|
||||
// if (row[0].rec != NULL) {
|
||||
// if (row[0].rec != nullptr) {
|
||||
// // inner copy of decision tree, rooted at row[1]
|
||||
// if (row[1].rec == rec) { row[1].incr(); goto done; }
|
||||
// if (row[1].rec != NULL) {
|
||||
// if (row[1].rec != nullptr) {
|
||||
// // degenerate decision tree, rooted at row[2]
|
||||
// if (row[2].rec == rec) { row[2].incr(); goto done; }
|
||||
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
|
||||
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
|
||||
// row[2].init(rec); goto done;
|
||||
// } else {
|
||||
// // remember row[1] is empty
|
||||
@ -1583,7 +1583,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
|
||||
ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
cbz(rscratch1, L);
|
||||
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
|
||||
" last_sp != NULL");
|
||||
" last_sp != nullptr");
|
||||
bind(L);
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
@ -1611,7 +1611,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
||||
ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
cbz(rscratch1, L);
|
||||
stop("InterpreterMacroAssembler::call_VM_base:"
|
||||
" last_sp != NULL");
|
||||
" last_sp != nullptr");
|
||||
bind(L);
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
|
@ -175,7 +175,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void empty_expression_stack() {
|
||||
ldr(esp, Address(rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
// null last_sp until next java call
|
||||
str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -267,7 +267,7 @@ class SlowSignatureHandler
|
||||
|
||||
virtual void pass_object() {
|
||||
intptr_t* addr = single_slot_addr();
|
||||
intptr_t value = *addr == 0 ? NULL : (intptr_t)addr;
|
||||
intptr_t value = *addr == 0 ? (intptr_t)0 : (intptr_t)addr;
|
||||
if (pass_gpr(value) < 0) {
|
||||
pass_stack<>(value);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -40,10 +40,10 @@ public:
|
||||
|
||||
void clear(void) {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
_last_Java_fp = NULL;
|
||||
_last_Java_pc = NULL;
|
||||
_last_Java_fp = nullptr;
|
||||
_last_Java_pc = nullptr;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
@ -51,11 +51,11 @@ public:
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// To act like previous version (pd_cache_state) don't null _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = NULL;
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
@ -64,7 +64,7 @@ public:
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
}
|
||||
|
||||
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
|
||||
bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; }
|
||||
|
||||
void make_walkable();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -108,7 +108,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
|
||||
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
|
||||
default: ShouldNotReachHere();
|
||||
name = NULL; // unreachable
|
||||
name = nullptr; // unreachable
|
||||
}
|
||||
ResourceMark rm;
|
||||
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
|
||||
@ -196,7 +196,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
|
||||
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
|
||||
default: ShouldNotReachHere();
|
||||
slow_case_addr = NULL; // unreachable
|
||||
slow_case_addr = nullptr; // unreachable
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -648,7 +648,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
Register last_java_fp,
|
||||
address last_java_pc,
|
||||
Register scratch) {
|
||||
assert(last_java_pc != NULL, "must provide a valid PC");
|
||||
assert(last_java_pc != nullptr, "must provide a valid PC");
|
||||
|
||||
adr(scratch, last_java_pc);
|
||||
str(scratch, Address(rthread,
|
||||
@ -686,7 +686,7 @@ static inline bool target_needs_far_branch(address addr) {
|
||||
|
||||
void MacroAssembler::far_call(Address entry, Register tmp) {
|
||||
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
assert(CodeCache::find_blob(entry.target()) != nullptr,
|
||||
"destination of far call not found in code cache");
|
||||
assert(entry.rspec().type() == relocInfo::external_word_type
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
@ -705,7 +705,7 @@ void MacroAssembler::far_call(Address entry, Register tmp) {
|
||||
|
||||
int MacroAssembler::far_jump(Address entry, Register tmp) {
|
||||
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
assert(CodeCache::find_blob(entry.target()) != nullptr,
|
||||
"destination of far call not found in code cache");
|
||||
assert(entry.rspec().type() == relocInfo::external_word_type
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
@ -863,7 +863,7 @@ static bool is_always_within_branch_range(Address entry) {
|
||||
// Runtime calls are calls of a non-compiled method (stubs, adapters).
|
||||
// Non-compiled methods stay forever in CodeCache.
|
||||
// We check whether the longest possible branch is within the branch range.
|
||||
assert(CodeCache::find_blob(target) != NULL &&
|
||||
assert(CodeCache::find_blob(target) != nullptr &&
|
||||
!CodeCache::find_blob(target)->is_compiled(),
|
||||
"runtime call of compiled method");
|
||||
const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
|
||||
@ -895,9 +895,9 @@ address MacroAssembler::trampoline_call(Address entry) {
|
||||
code()->share_trampoline_for(entry.target(), offset());
|
||||
} else {
|
||||
address stub = emit_trampoline_stub(offset(), target);
|
||||
if (stub == NULL) {
|
||||
if (stub == nullptr) {
|
||||
postcond(pc() == badAddress);
|
||||
return NULL; // CodeCache is full
|
||||
return nullptr; // CodeCache is full
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -927,8 +927,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
address dest) {
|
||||
// Max stub size: alignment nop, TrampolineStub.
|
||||
address stub = start_a_stub(max_trampoline_stub_size());
|
||||
if (stub == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
if (stub == nullptr) {
|
||||
return nullptr; // CodeBuffer::expand failed
|
||||
}
|
||||
|
||||
// Create a trampoline stub relocation which relates this trampoline stub
|
||||
@ -968,7 +968,7 @@ void MacroAssembler::emit_static_call_stub() {
|
||||
// exact layout of this stub.
|
||||
|
||||
isb();
|
||||
mov_metadata(rmethod, (Metadata*)NULL);
|
||||
mov_metadata(rmethod, nullptr);
|
||||
|
||||
// Jump to the entry point of the c2i stub.
|
||||
movptr(rscratch1, 0);
|
||||
@ -1164,7 +1164,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
add(recv_klass, recv_klass, itentry_off);
|
||||
}
|
||||
|
||||
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
|
||||
// for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
|
||||
// if (scan->interface() == intf) {
|
||||
// result = (klass + scan->offset() + itable_index);
|
||||
// }
|
||||
@ -1223,8 +1223,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
Register temp_reg,
|
||||
Label& L_success) {
|
||||
Label L_failure;
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
|
||||
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
|
||||
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
|
||||
bind(L_failure);
|
||||
}
|
||||
|
||||
@ -1247,10 +1247,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
|
||||
|
||||
Label L_fallthrough;
|
||||
int label_nulls = 0;
|
||||
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one NULL in the batch");
|
||||
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one null in the batch");
|
||||
|
||||
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
|
||||
int sco_offset = in_bytes(Klass::super_check_offset_offset());
|
||||
@ -1369,9 +1369,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
||||
|
||||
Label L_fallthrough;
|
||||
int label_nulls = 0;
|
||||
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one NULL in the batch");
|
||||
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one null in the batch");
|
||||
|
||||
// a couple of useful fields in sub_klass:
|
||||
int ss_offset = in_bytes(Klass::secondary_supers_offset());
|
||||
@ -1442,13 +1442,13 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
||||
}
|
||||
|
||||
void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
|
||||
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
|
||||
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
|
||||
assert_different_registers(klass, rthread, scratch);
|
||||
|
||||
Label L_fallthrough, L_tmp;
|
||||
if (L_fast_path == NULL) {
|
||||
if (L_fast_path == nullptr) {
|
||||
L_fast_path = &L_fallthrough;
|
||||
} else if (L_slow_path == NULL) {
|
||||
} else if (L_slow_path == nullptr) {
|
||||
L_slow_path = &L_fallthrough;
|
||||
}
|
||||
// Fast path check: class is fully initialized
|
||||
@ -1475,7 +1475,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
|
||||
if (!VerifyOops) return;
|
||||
|
||||
// Pass register number to verify_oop_subroutine
|
||||
const char* b = NULL;
|
||||
const char* b = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -1507,7 +1507,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
|
||||
void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
|
||||
if (!VerifyOops) return;
|
||||
|
||||
const char* b = NULL;
|
||||
const char* b = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -1639,13 +1639,13 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
|
||||
|
||||
void MacroAssembler::null_check(Register reg, int offset) {
|
||||
if (needs_explicit_null_check(offset)) {
|
||||
// provoke OS NULL exception if reg = NULL by
|
||||
// provoke OS null exception if reg is null by
|
||||
// accessing M[reg] w/o changing any registers
|
||||
// NOTE: this is plenty to provoke a segv
|
||||
ldr(zr, Address(reg));
|
||||
} else {
|
||||
// nothing to do, (later) access of M[reg + offset]
|
||||
// will provoke OS NULL exception if reg = NULL
|
||||
// will provoke OS null exception if reg is null
|
||||
}
|
||||
}
|
||||
|
||||
@ -1963,7 +1963,7 @@ int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
|
||||
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
|
||||
address prev = pc() - NativeMembar::instruction_size;
|
||||
address last = code()->last_insn();
|
||||
if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) {
|
||||
if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
|
||||
NativeMembar *bar = NativeMembar_at(prev);
|
||||
// We are merging two memory barrier instructions. On AArch64 we
|
||||
// can do this simply by ORing them together.
|
||||
@ -2448,8 +2448,8 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
|
||||
void MacroAssembler::verify_heapbase(const char* msg) {
|
||||
#if 0
|
||||
assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
if (!UseCompressedOops || Universe::ptr_base() == NULL) {
|
||||
assert (Universe::heap() != nullptr, "java heap should be initialized");
|
||||
if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
|
||||
// rheapbase is allocated as general register
|
||||
return;
|
||||
}
|
||||
@ -2470,7 +2470,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
|
||||
assert_different_registers(value, tmp1, tmp2);
|
||||
Label done, tagged, weak_tagged;
|
||||
|
||||
cbz(value, done); // Use NULL as-is.
|
||||
cbz(value, done); // Use null as-is.
|
||||
tst(value, JNIHandles::tag_mask); // Test for tag.
|
||||
br(Assembler::NE, tagged);
|
||||
|
||||
@ -2501,7 +2501,7 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis
|
||||
assert_different_registers(value, tmp1, tmp2);
|
||||
Label done;
|
||||
|
||||
cbz(value, done); // Use NULL as-is.
|
||||
cbz(value, done); // Use null as-is.
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -2527,7 +2527,7 @@ void MacroAssembler::stop(const char* msg) {
|
||||
}
|
||||
|
||||
void MacroAssembler::unimplemented(const char* what) {
|
||||
const char* buf = NULL;
|
||||
const char* buf = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -3096,7 +3096,7 @@ bool MacroAssembler::ldst_can_merge(Register rt,
|
||||
address prev = pc() - NativeInstruction::instruction_size;
|
||||
address last = code()->last_insn();
|
||||
|
||||
if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) {
|
||||
if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -4356,7 +4356,7 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R
|
||||
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
|
||||
if (UseCompressedClassPointers) {
|
||||
ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
|
||||
if (CompressedKlassPointers::base() == NULL) {
|
||||
if (CompressedKlassPointers::base() == nullptr) {
|
||||
cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
|
||||
return;
|
||||
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
|
||||
@ -4396,7 +4396,7 @@ void MacroAssembler::encode_heap_oop(Register d, Register s) {
|
||||
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
|
||||
#endif
|
||||
verify_oop_msg(s, "broken oop in encode_heap_oop");
|
||||
if (CompressedOops::base() == NULL) {
|
||||
if (CompressedOops::base() == nullptr) {
|
||||
if (CompressedOops::shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
|
||||
lsr(d, s, LogMinObjAlignmentInBytes);
|
||||
@ -4429,7 +4429,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register r) {
|
||||
}
|
||||
#endif
|
||||
verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
|
||||
if (CompressedOops::base() != NULL) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
sub(r, r, rheapbase);
|
||||
}
|
||||
if (CompressedOops::shift() != 0) {
|
||||
@ -4451,7 +4451,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
|
||||
verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
|
||||
|
||||
Register data = src;
|
||||
if (CompressedOops::base() != NULL) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
sub(dst, src, rheapbase);
|
||||
data = dst;
|
||||
}
|
||||
@ -4468,7 +4468,7 @@ void MacroAssembler::decode_heap_oop(Register d, Register s) {
|
||||
#ifdef ASSERT
|
||||
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
|
||||
#endif
|
||||
if (CompressedOops::base() == NULL) {
|
||||
if (CompressedOops::base() == nullptr) {
|
||||
if (CompressedOops::shift() != 0 || d != s) {
|
||||
lsl(d, s, CompressedOops::shift());
|
||||
}
|
||||
@ -4485,37 +4485,37 @@ void MacroAssembler::decode_heap_oop(Register d, Register s) {
|
||||
|
||||
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
||||
assert (UseCompressedOops, "should only be used for compressed headers");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
assert (Universe::heap() != nullptr, "java heap should be initialized");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
if (CompressedOops::shift() != 0) {
|
||||
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
|
||||
if (CompressedOops::base() != NULL) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
|
||||
} else {
|
||||
add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
} else {
|
||||
assert (CompressedOops::base() == NULL, "sanity");
|
||||
assert (CompressedOops::base() == nullptr, "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
||||
assert (UseCompressedOops, "should only be used for compressed headers");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
assert (Universe::heap() != nullptr, "java heap should be initialized");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
if (CompressedOops::shift() != 0) {
|
||||
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
|
||||
if (CompressedOops::base() != NULL) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
|
||||
} else {
|
||||
add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
} else {
|
||||
assert (CompressedOops::base() == NULL, "sanity");
|
||||
assert (CompressedOops::base() == nullptr, "sanity");
|
||||
if (dst != src) {
|
||||
mov(dst, src);
|
||||
}
|
||||
@ -4535,7 +4535,7 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
|
||||
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
|
||||
|| 0 == CompressedKlassPointers::shift(), "decode alg wrong");
|
||||
|
||||
if (CompressedKlassPointers::base() == NULL) {
|
||||
if (CompressedKlassPointers::base() == nullptr) {
|
||||
return (_klass_decode_mode = KlassDecodeZero);
|
||||
}
|
||||
|
||||
@ -4643,8 +4643,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
{
|
||||
ThreadInVMfromUnknown tiv;
|
||||
assert (UseCompressedOops, "should only be used for compressed oops");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
assert (Universe::heap() != nullptr, "java heap should be initialized");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
|
||||
}
|
||||
#endif
|
||||
@ -4658,7 +4658,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
|
||||
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(! Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
@ -4711,13 +4711,13 @@ void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
|
||||
access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
|
||||
}
|
||||
|
||||
// Used for storing NULLs.
|
||||
// Used for storing nulls.
|
||||
void MacroAssembler::store_heap_oop_null(Address dst) {
|
||||
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
|
||||
}
|
||||
|
||||
Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
|
||||
assert(oop_recorder() != NULL, "this assembler needs a Recorder");
|
||||
assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
|
||||
int index = oop_recorder()->allocate_metadata_index(obj);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(index);
|
||||
return Address((address)obj, rspec);
|
||||
@ -4726,7 +4726,7 @@ Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
|
||||
// Move an oop into a register.
|
||||
void MacroAssembler::movoop(Register dst, jobject obj) {
|
||||
int oop_index;
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
oop_index = oop_recorder()->allocate_oop_index(obj);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
@ -4751,7 +4751,7 @@ void MacroAssembler::movoop(Register dst, jobject obj) {
|
||||
// Move a metadata address into a register.
|
||||
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
|
||||
int oop_index;
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
oop_index = oop_recorder()->allocate_metadata_index(obj);
|
||||
} else {
|
||||
oop_index = oop_recorder()->find_index(obj);
|
||||
@ -4764,7 +4764,7 @@ Address MacroAssembler::constant_oop_address(jobject obj) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
ThreadInVMfromUnknown tiv;
|
||||
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
|
||||
}
|
||||
#endif
|
||||
@ -4978,23 +4978,23 @@ address MacroAssembler::count_positives(Register ary1, Register len, Register re
|
||||
|
||||
BIND(STUB);
|
||||
RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
|
||||
assert(count_pos.target() != NULL, "count_positives stub has not been generated");
|
||||
assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
|
||||
address tpc1 = trampoline_call(count_pos);
|
||||
if (tpc1 == NULL) {
|
||||
if (tpc1 == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
|
||||
postcond(pc() == badAddress);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
b(DONE);
|
||||
|
||||
BIND(STUB_LONG);
|
||||
RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
|
||||
assert(count_pos_long.target() != NULL, "count_positives_long stub has not been generated");
|
||||
assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
|
||||
address tpc2 = trampoline_call(count_pos_long);
|
||||
if (tpc2 == NULL) {
|
||||
if (tpc2 == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
|
||||
postcond(pc() == badAddress);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
b(DONE);
|
||||
|
||||
@ -5041,7 +5041,7 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
|
||||
if (UseSimpleArrayEquals) {
|
||||
Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
|
||||
// if (a1 == null || a2 == null)
|
||||
// if (a1 == nullptr || a2 == nullptr)
|
||||
// return false;
|
||||
// a1 & a2 == 0 means (some-pointer is null) or
|
||||
// (very-rare-or-even-probably-impossible-pointer-values)
|
||||
@ -5172,12 +5172,12 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
eor(tmp5, tmp3, tmp4);
|
||||
cbnz(tmp5, DONE);
|
||||
RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
|
||||
assert(stub.target() != NULL, "array_equals_long stub has not been generated");
|
||||
assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
|
||||
address tpc = trampoline_call(stub);
|
||||
if (tpc == NULL) {
|
||||
if (tpc == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
|
||||
postcond(pc() == badAddress);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
b(DONE);
|
||||
|
||||
@ -5324,14 +5324,14 @@ address MacroAssembler::zero_words(Register ptr, Register cnt)
|
||||
BLOCK_COMMENT("zero_words {");
|
||||
assert(ptr == r10 && cnt == r11, "mismatch in register usage");
|
||||
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
|
||||
assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
|
||||
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
|
||||
|
||||
subs(rscratch1, cnt, zero_words_block_size);
|
||||
Label around;
|
||||
br(LO, around);
|
||||
{
|
||||
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
|
||||
assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
|
||||
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
|
||||
// Make sure this is a C2 compilation. C1 allocates space only for
|
||||
// trampoline stubs generated by Call LIR ops, and in any case it
|
||||
// makes sense for a C1 compilation task to proceed as quickly as
|
||||
@ -5342,9 +5342,9 @@ address MacroAssembler::zero_words(Register ptr, Register cnt)
|
||||
&& (task = ciEnv::current()->task())
|
||||
&& is_c2_compile(task->comp_level())) {
|
||||
address tpc = trampoline_call(zero_blocks);
|
||||
if (tpc == NULL) {
|
||||
if (tpc == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(around));
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
far_call(zero_blocks);
|
||||
@ -5693,12 +5693,12 @@ address MacroAssembler::byte_array_inflate(Register src, Register dst, Register
|
||||
if (SoftwarePrefetchHintDistance >= 0) {
|
||||
bind(to_stub);
|
||||
RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
|
||||
assert(stub.target() != NULL, "large_byte_array_inflate stub has not been generated");
|
||||
assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
|
||||
address tpc = trampoline_call(stub);
|
||||
if (tpc == NULL) {
|
||||
if (tpc == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(big, done));
|
||||
postcond(pc() == badAddress);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
b(after_init);
|
||||
}
|
||||
@ -6085,7 +6085,7 @@ void MacroAssembler::object_move(
|
||||
|
||||
Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
|
||||
|
||||
// See if oop is NULL if it is we need no handle
|
||||
// See if oop is null if it is we need no handle
|
||||
|
||||
if (src.first()->is_stack()) {
|
||||
|
||||
@ -6098,13 +6098,13 @@ void MacroAssembler::object_move(
|
||||
|
||||
ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
|
||||
lea(rHandle, Address(rfp, reg2offset_in(src.first())));
|
||||
// conditionally move a NULL
|
||||
// conditionally move a null
|
||||
cmp(rscratch1, zr);
|
||||
csel(rHandle, zr, rHandle, Assembler::EQ);
|
||||
} else {
|
||||
|
||||
// Oop is in an a register we must store it to the space we reserve
|
||||
// on the stack for oop_handles and pass a handle if oop is non-NULL
|
||||
// on the stack for oop_handles and pass a handle if oop is non-null
|
||||
|
||||
const Register rOop = src.first()->as_Register();
|
||||
int oop_slot;
|
||||
@ -6131,7 +6131,7 @@ void MacroAssembler::object_move(
|
||||
int offset = oop_slot*VMRegImpl::stack_slot_size;
|
||||
|
||||
map->set_oop(VMRegImpl::stack2reg(oop_slot));
|
||||
// Store oop in handle area, may be NULL
|
||||
// Store oop in handle area, may be null
|
||||
str(rOop, Address(sp, offset));
|
||||
if (is_receiver) {
|
||||
*receiver_offset = offset;
|
||||
@ -6139,7 +6139,7 @@ void MacroAssembler::object_move(
|
||||
|
||||
cmp(rOop, zr);
|
||||
lea(rHandle, Address(sp, offset));
|
||||
// conditionally move a NULL
|
||||
// conditionally move a null
|
||||
csel(rHandle, zr, rHandle, Assembler::EQ);
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ class MacroAssembler: public Assembler {
|
||||
virtual void call_VM_leaf_base(
|
||||
address entry_point, // the entry point
|
||||
int number_of_arguments, // the number of arguments to pop after the call
|
||||
Label *retaddr = NULL
|
||||
Label *retaddr = nullptr
|
||||
);
|
||||
|
||||
virtual void call_VM_leaf_base(
|
||||
@ -602,9 +602,9 @@ public:
|
||||
int corrected_idivq(Register result, Register ra, Register rb,
|
||||
bool want_remainder, Register tmp = rscratch1);
|
||||
|
||||
// Support for NULL-checks
|
||||
// Support for null-checks
|
||||
//
|
||||
// Generates code that causes a NULL OS exception if the content of reg is NULL.
|
||||
// Generates code that causes a null OS exception if the content of reg is null.
|
||||
// If the accessed location is M[reg + offset] and the offset is known, provide the
|
||||
// offset. No explicit code generation is needed if the offset is within a certain
|
||||
// range (0 <= offset <= page_size).
|
||||
@ -627,7 +627,7 @@ public:
|
||||
// Required platform-specific helpers for Label::patch_instructions.
|
||||
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
|
||||
static int pd_patch_instruction_size(address branch, address target);
|
||||
static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) {
|
||||
static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
|
||||
pd_patch_instruction_size(branch, target);
|
||||
}
|
||||
static address pd_call_destination(address branch) {
|
||||
@ -872,14 +872,14 @@ public:
|
||||
Register tmp2, Register tmp3, DecoratorSet decorators = 0);
|
||||
|
||||
// currently unimplemented
|
||||
// Used for storing NULL. All other oop constants should be
|
||||
// Used for storing null. All other oop constants should be
|
||||
// stored using routines that take a jobject.
|
||||
void store_heap_oop_null(Address dst);
|
||||
|
||||
void store_klass_gap(Register dst, Register src);
|
||||
|
||||
// This dummy is to prevent a call to store_heap_oop from
|
||||
// converting a zero (like NULL) into a Register by giving
|
||||
// converting a zero (like null) into a Register by giving
|
||||
// the compiler two choices it can't resolve
|
||||
|
||||
void store_heap_oop(Address dst, void* dummy);
|
||||
@ -951,7 +951,7 @@ public:
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
// One of the three labels can be NULL, meaning take the fall-through.
|
||||
// One of the three labels can be null, meaning take the fall-through.
|
||||
// If super_check_offset is -1, the value is loaded up from super_klass.
|
||||
// No registers are killed, except temp_reg.
|
||||
void check_klass_subtype_fast_path(Register sub_klass,
|
||||
@ -984,8 +984,8 @@ public:
|
||||
|
||||
void clinit_barrier(Register klass,
|
||||
Register thread,
|
||||
Label* L_fast_path = NULL,
|
||||
Label* L_slow_path = NULL);
|
||||
Label* L_fast_path = nullptr,
|
||||
Label* L_slow_path = nullptr);
|
||||
|
||||
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
|
||||
|
||||
@ -1188,7 +1188,7 @@ public:
|
||||
// - relocInfo::static_call_type
|
||||
// - relocInfo::virtual_call_type
|
||||
//
|
||||
// Return: the call PC or NULL if CodeCache is full.
|
||||
// Return: the call PC or null if CodeCache is full.
|
||||
address trampoline_call(Address entry);
|
||||
|
||||
static bool far_branches() {
|
||||
|
@ -94,12 +94,12 @@
|
||||
|
||||
static bool const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
return CompressedOops::base() == NULL;
|
||||
return CompressedOops::base() == nullptr;
|
||||
}
|
||||
|
||||
static bool const_klass_prefer_decode() {
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
return CompressedKlassPointers::base() == NULL;
|
||||
return CompressedKlassPointers::base() == nullptr;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -175,14 +175,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
|
||||
// They all allow an appendix argument.
|
||||
__ hlt(0); // empty stubs make SG sick
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// No need in interpreter entry for linkToNative for now.
|
||||
// Interpreter calls compiled entry through i2c.
|
||||
if (iid == vmIntrinsics::_linkToNative) {
|
||||
__ hlt(0);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// r19_sender_sp: sender SP (must preserve; see prepare_to_jump_from_interpreted)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -197,7 +197,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
if (trampoline_stub_addr != NULL) {
|
||||
if (trampoline_stub_addr != nullptr) {
|
||||
assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
|
||||
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
}
|
||||
@ -206,7 +206,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
if (reachable) {
|
||||
set_destination(dest);
|
||||
} else {
|
||||
assert (trampoline_stub_addr != NULL, "we need a trampoline");
|
||||
assert (trampoline_stub_addr != nullptr, "we need a trampoline");
|
||||
set_destination(trampoline_stub_addr);
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ address NativeCall::get_trampoline() {
|
||||
address call_addr = addr_at(0);
|
||||
|
||||
CodeBlob *code = CodeCache::find_blob(call_addr);
|
||||
assert(code != NULL, "Could not find the containing code blob");
|
||||
assert(code != nullptr, "Could not find the containing code blob");
|
||||
|
||||
address bl_destination
|
||||
= MacroAssembler::pd_call_destination(call_addr);
|
||||
@ -229,7 +229,7 @@ address NativeCall::get_trampoline() {
|
||||
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Inserts a native call instruction at a given pc
|
||||
@ -270,7 +270,7 @@ void NativeMovConstReg::set_data(intptr_t x) {
|
||||
// instruction in oops section.
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
RelocIterator iter(nm, instruction_address(), next_instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -294,7 +294,7 @@ public:
|
||||
else if (is_ldr_literal_at(instruction_address()))
|
||||
return(addr_at(4));
|
||||
assert(false, "Unknown instruction in NativeMovConstReg");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
intptr_t data() const;
|
||||
@ -589,7 +589,7 @@ public:
|
||||
next_instruction_offset = 4 * 4
|
||||
};
|
||||
|
||||
address destination(nmethod* nm = NULL) const;
|
||||
address destination(nmethod* nm = nullptr) const;
|
||||
void set_destination(address new_destination);
|
||||
ptrdiff_t destination_offset() const;
|
||||
};
|
||||
@ -709,7 +709,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
|
||||
if (nop->check()) {
|
||||
return nop;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {
|
||||
@ -731,7 +731,7 @@ class NativeDeoptInstruction: public NativeInstruction {
|
||||
void verify();
|
||||
|
||||
static bool is_deopt_at(address instr) {
|
||||
assert(instr != NULL, "");
|
||||
assert(instr != nullptr, "");
|
||||
uint32_t value = *(uint32_t *) instr;
|
||||
return value == 0xd4ade001;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Arm Limited. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -37,10 +37,10 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
|
||||
FloatRegister::max_slots_per_register;
|
||||
intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
|
||||
address base_location = location(base_reg, nullptr);
|
||||
if (base_location != NULL) {
|
||||
if (base_location != nullptr) {
|
||||
return base_location + offset_in_bytes;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
return location(base_reg->next(slot_idx), nullptr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
private:
|
||||
// This is the hook for finding a register in an "well-known" location,
|
||||
// such as a register block of a predetermined format.
|
||||
address pd_location(VMReg reg) const { return NULL; }
|
||||
address pd_location(VMReg reg) const { return nullptr; }
|
||||
address pd_location(VMReg base_reg, int slot_idx) const;
|
||||
|
||||
// no PD state to clear or copy:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -66,7 +66,7 @@ address Relocation::pd_call_destination(address orig_addr) {
|
||||
return nativeCallTrampolineStub_at(trampoline)->destination();
|
||||
}
|
||||
}
|
||||
if (orig_addr != NULL) {
|
||||
if (orig_addr != nullptr) {
|
||||
address new_addr = MacroAssembler::pd_call_destination(orig_addr);
|
||||
// If call is branch to self, don't try to relocate it, just leave it
|
||||
// as branch to self. This happens during code generation if the code
|
||||
|
@ -570,7 +570,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||
// caller, but with an uncorrected stack, causing delayed havoc.
|
||||
|
||||
if (VerifyAdapterCalls &&
|
||||
(Interpreter::code() != NULL || StubRoutines::final_stubs_code() != NULL)) {
|
||||
(Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
|
||||
#if 0
|
||||
// So, let's test for cascading c2i/i2c adapters right now.
|
||||
// assert(Interpreter::contains($return_addr) ||
|
||||
@ -578,18 +578,18 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||
// "i2c adapter must return to an interpreter frame");
|
||||
__ block_comment("verify_i2c { ");
|
||||
Label L_ok;
|
||||
if (Interpreter::code() != NULL) {
|
||||
if (Interpreter::code() != nullptr) {
|
||||
range_check(masm, rax, r11,
|
||||
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
|
||||
L_ok);
|
||||
}
|
||||
if (StubRoutines::initial_stubs_code() != NULL) {
|
||||
if (StubRoutines::initial_stubs_code() != nullptr) {
|
||||
range_check(masm, rax, r11,
|
||||
StubRoutines::initial_stubs_code()->code_begin(),
|
||||
StubRoutines::initial_stubs_code()->code_end(),
|
||||
L_ok);
|
||||
}
|
||||
if (StubRoutines::final_stubs_code() != NULL) {
|
||||
if (StubRoutines::final_stubs_code() != nullptr) {
|
||||
range_check(masm, rax, r11,
|
||||
StubRoutines::final_stubs_code()->code_begin(),
|
||||
StubRoutines::final_stubs_code()->code_end(),
|
||||
@ -773,7 +773,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
address c2i_entry = __ pc();
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
address c2i_no_clinit_check_entry = NULL;
|
||||
address c2i_no_clinit_check_entry = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
|
||||
@ -803,7 +803,7 @@ static int c_calling_convention_priv(const BasicType *sig_bt,
|
||||
VMRegPair *regs,
|
||||
VMRegPair *regs2,
|
||||
int total_args_passed) {
|
||||
assert(regs2 == NULL, "not needed on AArch64");
|
||||
assert(regs2 == nullptr, "not needed on AArch64");
|
||||
|
||||
// We return the amount of VMRegImpl stack slots we need to reserve for all
|
||||
// the arguments NOT counting out_preserve_stack_slots.
|
||||
@ -1421,10 +1421,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(-1),
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
nullptr);
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
assert(native_func != nullptr, "must have function");
|
||||
|
||||
// An OopMap for lock (and class if static)
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
@ -1441,7 +1441,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
BasicType* in_elem_bt = NULL;
|
||||
BasicType* in_elem_bt = nullptr;
|
||||
|
||||
int argc = 0;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
@ -1456,10 +1456,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Now figure out where the args must be stored and how much stack space
|
||||
// they require.
|
||||
int out_arg_slots;
|
||||
out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, NULL, total_c_args);
|
||||
out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, nullptr, total_c_args);
|
||||
|
||||
if (out_arg_slots < 0) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Compute framesize for the wrapper. We need to handlize all oops in
|
||||
@ -1581,7 +1581,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ sub(sp, sp, stack_size - 2*wordSize);
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
|
||||
bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
|
||||
|
||||
// Frame is now completed as far as size and linkage.
|
||||
int frame_complete = ((intptr_t)__ pc()) - start;
|
||||
@ -1787,7 +1787,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
|
||||
// src -> dest iff dest == r0 else r0 <- dest
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
|
||||
@ -2178,7 +2178,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
int frame_size_in_words;
|
||||
OopMap* map = NULL;
|
||||
OopMap* map = nullptr;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
|
||||
|
||||
@ -2384,7 +2384,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
|
||||
__ br(Assembler::NE, noException);
|
||||
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
|
||||
// QQQ this is useless it was NULL above
|
||||
// QQQ this is useless it was null above
|
||||
__ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
|
||||
__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
|
||||
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
|
||||
@ -2764,7 +2764,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
address start = __ pc();
|
||||
address call_pc = NULL;
|
||||
address call_pc = nullptr;
|
||||
int frame_size_in_words;
|
||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||
RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
|
||||
@ -2880,7 +2880,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// must do any gc of the args.
|
||||
//
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
|
||||
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
|
||||
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
|
||||
|
||||
// allocate space for the code
|
||||
ResourceMark rm;
|
||||
@ -2892,7 +2892,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
RegisterSaver reg_save(false /* save_vectors */);
|
||||
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap* map = NULL;
|
||||
OopMap* map = nullptr;
|
||||
|
||||
int start = __ offset();
|
||||
|
||||
|
@ -438,7 +438,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ strw(rscratch1, Address(rthread, Thread::exception_line_offset()));
|
||||
|
||||
// complete return to VM
|
||||
assert(StubRoutines::_call_stub_return_address != NULL,
|
||||
assert(StubRoutines::_call_stub_return_address != nullptr,
|
||||
"_call_stub_return_address must have been generated before");
|
||||
__ b(StubRoutines::_call_stub_return_address);
|
||||
|
||||
@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// object is in r0
|
||||
// make sure object is 'reasonable'
|
||||
__ cbz(r0, exit); // if obj is NULL it is OK
|
||||
__ cbz(r0, exit); // if obj is null it is OK
|
||||
|
||||
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs_asm->check_oop(_masm, r0, c_rarg2, c_rarg3, error);
|
||||
@ -1480,7 +1480,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@ -1546,7 +1546,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@ -1824,9 +1824,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
Label L_miss;
|
||||
|
||||
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
|
||||
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr,
|
||||
super_check_offset);
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr);
|
||||
|
||||
// Fall through on failure!
|
||||
__ BIND(L_miss);
|
||||
@ -1897,7 +1897,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#endif //ASSERT
|
||||
|
||||
// Caller of this entry point must set up the argument registers.
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
BLOCK_COMMENT("Entry:");
|
||||
}
|
||||
@ -2134,19 +2134,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (2) src_pos must not be negative.
|
||||
// (3) dst_pos must not be negative.
|
||||
// (4) length must not be negative.
|
||||
// (5) src klass and dst klass should be the same and not NULL.
|
||||
// (5) src klass and dst klass should be the same and not null.
|
||||
// (6) src and dst should be arrays.
|
||||
// (7) src_pos + length must not exceed length of src.
|
||||
// (8) dst_pos + length must not exceed length of dst.
|
||||
//
|
||||
|
||||
// if (src == NULL) return -1;
|
||||
// if (src == nullptr) return -1;
|
||||
__ cbz(src, L_failed);
|
||||
|
||||
// if (src_pos < 0) return -1;
|
||||
__ tbnz(src_pos, 31, L_failed); // i.e. sign bit set
|
||||
|
||||
// if (dst == NULL) return -1;
|
||||
// if (dst == nullptr) return -1;
|
||||
__ cbz(dst, L_failed);
|
||||
|
||||
// if (dst_pos < 0) return -1;
|
||||
@ -2163,11 +2163,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ load_klass(scratch_src_klass, src);
|
||||
#ifdef ASSERT
|
||||
// assert(src->klass() != NULL);
|
||||
// assert(src->klass() != nullptr);
|
||||
{
|
||||
BLOCK_COMMENT("assert klasses not null {");
|
||||
Label L1, L2;
|
||||
__ cbnz(scratch_src_klass, L2); // it is broken if klass is NULL
|
||||
__ cbnz(scratch_src_klass, L2); // it is broken if klass is null
|
||||
__ bind(L1);
|
||||
__ stop("broken null klass");
|
||||
__ bind(L2);
|
||||
@ -2575,7 +2575,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
"jbyte_arraycopy");
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
|
||||
"arrayof_jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL,
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr,
|
||||
"arrayof_jbyte_arraycopy");
|
||||
|
||||
//*** jshort
|
||||
@ -2587,7 +2587,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
"jshort_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
|
||||
"arrayof_jshort_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL,
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr,
|
||||
"arrayof_jshort_arraycopy");
|
||||
|
||||
//*** jint
|
||||
@ -2630,7 +2630,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
= generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit
|
||||
= generate_conjoint_oop_copy(aligned, entry, NULL, "arrayof_oop_arraycopy_uninit",
|
||||
= generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
}
|
||||
|
||||
@ -2640,7 +2640,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit;
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
|
||||
/*dest_uninitialized*/true);
|
||||
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
|
||||
@ -8090,7 +8090,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
generate_arraycopy_stubs();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
if (bs_nm != nullptr) {
|
||||
StubRoutines::aarch64::_method_entry_barrier = generate_method_entry_barrier();
|
||||
}
|
||||
|
||||
|
@ -33,30 +33,30 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::aarch64::_get_previous_sp_entry = NULL;
|
||||
address StubRoutines::aarch64::_get_previous_sp_entry = nullptr;
|
||||
|
||||
address StubRoutines::aarch64::_f2i_fixup = NULL;
|
||||
address StubRoutines::aarch64::_f2l_fixup = NULL;
|
||||
address StubRoutines::aarch64::_d2i_fixup = NULL;
|
||||
address StubRoutines::aarch64::_d2l_fixup = NULL;
|
||||
address StubRoutines::aarch64::_vector_iota_indices = NULL;
|
||||
address StubRoutines::aarch64::_float_sign_mask = NULL;
|
||||
address StubRoutines::aarch64::_float_sign_flip = NULL;
|
||||
address StubRoutines::aarch64::_double_sign_mask = NULL;
|
||||
address StubRoutines::aarch64::_double_sign_flip = NULL;
|
||||
address StubRoutines::aarch64::_zero_blocks = NULL;
|
||||
address StubRoutines::aarch64::_count_positives = NULL;
|
||||
address StubRoutines::aarch64::_count_positives_long = NULL;
|
||||
address StubRoutines::aarch64::_large_array_equals = NULL;
|
||||
address StubRoutines::aarch64::_compare_long_string_LL = NULL;
|
||||
address StubRoutines::aarch64::_compare_long_string_UU = NULL;
|
||||
address StubRoutines::aarch64::_compare_long_string_LU = NULL;
|
||||
address StubRoutines::aarch64::_compare_long_string_UL = NULL;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ll = NULL;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_uu = NULL;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ul = NULL;
|
||||
address StubRoutines::aarch64::_large_byte_array_inflate = NULL;
|
||||
address StubRoutines::aarch64::_method_entry_barrier = NULL;
|
||||
address StubRoutines::aarch64::_f2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_f2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_vector_iota_indices = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_zero_blocks = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives_long = nullptr;
|
||||
address StubRoutines::aarch64::_large_array_equals = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ul = nullptr;
|
||||
address StubRoutines::aarch64::_large_byte_array_inflate = nullptr;
|
||||
address StubRoutines::aarch64::_method_entry_barrier = nullptr;
|
||||
|
||||
static void empty_spin_wait() { }
|
||||
address StubRoutines::aarch64::_spin_wait = CAST_FROM_FN_PTR(address, empty_spin_wait);
|
||||
|
@ -99,7 +99,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// stack args <- esp
|
||||
// garbage
|
||||
// expression stack bottom
|
||||
// bcp (NULL)
|
||||
// bcp (null)
|
||||
// ...
|
||||
|
||||
// Restore LR
|
||||
@ -162,7 +162,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
// [ arg ]
|
||||
// retaddr in lr
|
||||
|
||||
address entry_point = NULL;
|
||||
address entry_point = nullptr;
|
||||
Register continuation = lr;
|
||||
switch (kind) {
|
||||
case Interpreter::java_lang_math_abs:
|
||||
@ -242,49 +242,49 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr
|
||||
address fn;
|
||||
switch (kind) {
|
||||
case Interpreter::java_lang_math_sin :
|
||||
if (StubRoutines::dsin() == NULL) {
|
||||
if (StubRoutines::dsin() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_cos :
|
||||
if (StubRoutines::dcos() == NULL) {
|
||||
if (StubRoutines::dcos() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_tan :
|
||||
if (StubRoutines::dtan() == NULL) {
|
||||
if (StubRoutines::dtan() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_log :
|
||||
if (StubRoutines::dlog() == NULL) {
|
||||
if (StubRoutines::dlog() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_log10 :
|
||||
if (StubRoutines::dlog10() == NULL) {
|
||||
if (StubRoutines::dlog10() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_exp :
|
||||
if (StubRoutines::dexp() == NULL) {
|
||||
if (StubRoutines::dexp() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
|
||||
}
|
||||
break;
|
||||
case Interpreter::java_lang_math_pow :
|
||||
if (StubRoutines::dpow() == NULL) {
|
||||
if (StubRoutines::dpow() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
|
||||
@ -292,7 +292,7 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
fn = NULL; // unreachable
|
||||
fn = nullptr; // unreachable
|
||||
}
|
||||
__ mov(rscratch1, fn);
|
||||
__ blr(rscratch1);
|
||||
@ -342,7 +342,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// abstract method entry
|
||||
|
||||
// pop return address, reset last_sp to NULL
|
||||
// pop return address, reset last_sp to null
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
@ -427,7 +427,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
|
||||
|
||||
address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
const char* name, const char* message, bool pass_oop) {
|
||||
assert(!pass_oop || message == NULL, "either oop or message but not both");
|
||||
assert(!pass_oop || message == nullptr, "either oop or message but not both");
|
||||
address entry = __ pc();
|
||||
if (pass_oop) {
|
||||
// object is at TOS
|
||||
@ -444,9 +444,9 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
create_klass_exception),
|
||||
c_rarg1, c_rarg2);
|
||||
} else {
|
||||
// kind of lame ExternalAddress can't take NULL because
|
||||
// kind of lame ExternalAddress can't take null because
|
||||
// external_word_Relocation will assert.
|
||||
if (message != NULL) {
|
||||
if (message != nullptr) {
|
||||
__ lea(c_rarg2, Address((address)message));
|
||||
} else {
|
||||
__ mov(c_rarg2, NULL_WORD);
|
||||
@ -465,7 +465,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
|
||||
// Restore stack bottom in case i2c adjusted stack
|
||||
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// and NULL it as marker that esp is now tos until next java call
|
||||
// and null it as marker that esp is now tos until next java call
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
@ -521,7 +521,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
|
||||
// Restore expression stack pointer
|
||||
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
// null last_sp until next java call
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
@ -560,7 +560,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
if (continuation == NULL) {
|
||||
if (continuation == nullptr) {
|
||||
__ dispatch_next(state, step);
|
||||
} else {
|
||||
__ jump_to_entry(continuation);
|
||||
@ -653,8 +653,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
|
||||
// InterpreterRuntime::frequency_counter_overflow takes two
|
||||
// arguments, the first (thread) is passed by call_VM, the second
|
||||
// indicates if the counter overflow occurs at a backwards branch
|
||||
// (NULL bcp). We pass zero for it. The call returns the address
|
||||
// of the verified entry point for the method or NULL if the
|
||||
// (null bcp). We pass zero for it. The call returns the address
|
||||
// of the verified entry point for the method or null if the
|
||||
// compilation did not complete (either went background or bailed
|
||||
// out).
|
||||
__ mov(c_rarg1, 0);
|
||||
@ -746,7 +746,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
// Note: the restored frame is not necessarily interpreted.
|
||||
// Use the shared runtime version of the StackOverflowError.
|
||||
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
|
||||
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
|
||||
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
|
||||
|
||||
// all done with frame size check
|
||||
@ -796,7 +796,7 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
{
|
||||
Label L;
|
||||
__ cbnz(r0, L);
|
||||
__ stop("synchronization object is NULL");
|
||||
__ stop("synchronization object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -934,7 +934,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
Label slow_path;
|
||||
const Register local_0 = c_rarg0;
|
||||
// Check if local 0 != NULL
|
||||
// Check if local 0 != null
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ ldr(local_0, Address(esp, 0));
|
||||
__ cbz(local_0, slow_path);
|
||||
@ -1894,7 +1894,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ br(Assembler::NE, L_done);
|
||||
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
|
||||
|
||||
__ ldr(c_rarg0, Address(rlocals, 0));
|
||||
__ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
|
||||
@ -2061,7 +2061,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
|
||||
// The run-time runtime saves the right registers, depending on
|
||||
// the tosca in-state for the given template.
|
||||
|
||||
assert(Interpreter::trace_code(t->tos_in()) != NULL,
|
||||
assert(Interpreter::trace_code(t->tos_in()) != nullptr,
|
||||
"entry must have been generated");
|
||||
__ bl(Interpreter::trace_code(t->tos_in()));
|
||||
__ reinit_heapbase();
|
||||
|
@ -139,8 +139,8 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) {
|
||||
|
||||
|
||||
// Miscellaneous helper routines
|
||||
// Store an oop (or NULL) at the Address described by obj.
|
||||
// If val == noreg this means store a NULL
|
||||
// Store an oop (or null) at the Address described by obj.
|
||||
// If val == noreg this means store a null
|
||||
static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
Address dst,
|
||||
Register val,
|
||||
@ -414,7 +414,7 @@ void TemplateTable::fast_aldc(LdcType type)
|
||||
__ resolve_oop_handle(tmp, r5, rscratch2);
|
||||
__ cmpoop(result, tmp);
|
||||
__ br(Assembler::NE, notNull);
|
||||
__ mov(result, 0); // NULL object reference
|
||||
__ mov(result, 0); // null object reference
|
||||
__ bind(notNull);
|
||||
}
|
||||
|
||||
@ -1109,7 +1109,7 @@ void TemplateTable::aastore() {
|
||||
index_check(r3, r2); // kills r1
|
||||
__ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
|
||||
|
||||
// do array store check - check for NULL value first
|
||||
// do array store check - check for null value first
|
||||
__ cbz(r0, is_null);
|
||||
|
||||
// Move subklass into r1
|
||||
@ -1137,11 +1137,11 @@ void TemplateTable::aastore() {
|
||||
do_oop_store(_masm, element_address, r0, IS_ARRAY);
|
||||
__ b(done);
|
||||
|
||||
// Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
|
||||
// Have a null in r0, r3=array, r2=index. Store null at ary[idx]
|
||||
__ bind(is_null);
|
||||
__ profile_null_seen(r2);
|
||||
|
||||
// Store a NULL
|
||||
// Store a null
|
||||
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
|
||||
|
||||
// Pop stack arguments
|
||||
@ -1864,7 +1864,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
r2);
|
||||
__ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
|
||||
|
||||
// r0: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
// r0: osr nmethod (osr ok) or null (osr not possible)
|
||||
// w1: target bytecode
|
||||
// r2: scratch
|
||||
__ cbz(r0, dispatch); // test result -- no osr if null
|
||||
@ -2274,7 +2274,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
__ load_resolved_method_at_index(byte_no, temp, Rcache);
|
||||
__ load_method_holder(temp, temp);
|
||||
__ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
|
||||
__ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2433,12 +2433,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
|
||||
__ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
|
||||
|
||||
if (is_static) {
|
||||
__ mov(c_rarg1, zr); // NULL object reference
|
||||
__ mov(c_rarg1, zr); // null object reference
|
||||
} else {
|
||||
__ ldr(c_rarg1, at_tos()); // get object pointer without popping it
|
||||
__ verify_oop(c_rarg1);
|
||||
}
|
||||
// c_rarg1: object pointer or NULL
|
||||
// c_rarg1: object pointer or null
|
||||
// c_rarg2: cache entry pointer
|
||||
// c_rarg3: jvalue object on the stack
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
@ -2686,7 +2686,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
|
||||
__ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
|
||||
// object (tos)
|
||||
__ mov(c_rarg3, esp);
|
||||
// c_rarg1: object pointer set up above (NULL if static)
|
||||
// c_rarg1: object pointer set up above (null if static)
|
||||
// c_rarg2: cache entry pointer
|
||||
// c_rarg3: jvalue object on the stack
|
||||
__ call_VM(noreg,
|
||||
@ -3683,7 +3683,7 @@ void TemplateTable::checkcast()
|
||||
__ bind(ok_is_subtype);
|
||||
__ mov(r0, r3); // Restore object in r3
|
||||
|
||||
// Collect counts on whether this test sees NULLs a lot or not.
|
||||
// Collect counts on whether this test sees nulls a lot or not.
|
||||
if (ProfileInterpreter) {
|
||||
__ b(done);
|
||||
__ bind(is_null);
|
||||
@ -3736,7 +3736,7 @@ void TemplateTable::instanceof() {
|
||||
__ bind(ok_is_subtype);
|
||||
__ mov(r0, 1);
|
||||
|
||||
// Collect counts on whether this test sees NULLs a lot or not.
|
||||
// Collect counts on whether this test sees nulls a lot or not.
|
||||
if (ProfileInterpreter) {
|
||||
__ b(done);
|
||||
__ bind(is_null);
|
||||
@ -3745,8 +3745,8 @@ void TemplateTable::instanceof() {
|
||||
__ bind(is_null); // same as 'done'
|
||||
}
|
||||
__ bind(done);
|
||||
// r0 = 0: obj == NULL or obj is not an instanceof the specified klass
|
||||
// r0 = 1: obj != NULL and obj is an instanceof the specified klass
|
||||
// r0 = 0: obj == nullptr or obj is not an instanceof the specified klass
|
||||
// r0 = 1: obj != nullptr and obj is an instanceof the specified klass
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -3806,7 +3806,7 @@ void TemplateTable::monitorenter()
|
||||
{
|
||||
transition(atos, vtos);
|
||||
|
||||
// check for NULL object
|
||||
// check for null object
|
||||
__ null_check(r0);
|
||||
|
||||
const Address monitor_block_top(
|
||||
@ -3818,7 +3818,7 @@ void TemplateTable::monitorenter()
|
||||
Label allocated;
|
||||
|
||||
// initialize entry pointer
|
||||
__ mov(c_rarg1, zr); // points to free slot or NULL
|
||||
__ mov(c_rarg1, zr); // points to free slot or null
|
||||
|
||||
// find a free slot in the monitor block (result in c_rarg1)
|
||||
{
|
||||
@ -3909,7 +3909,7 @@ void TemplateTable::monitorexit()
|
||||
{
|
||||
transition(atos, vtos);
|
||||
|
||||
// check for NULL object
|
||||
// check for null object
|
||||
__ null_check(r0);
|
||||
|
||||
const Address monitor_block_top(
|
||||
|
@ -591,7 +591,7 @@ static bool check_info_file(const char* fpath,
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
if (virt2 != NULL && strcasestr(line, virt2) != 0) {
|
||||
if (virt2 != nullptr && strcasestr(line, virt2) != 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = vt2;
|
||||
fclose(fp);
|
||||
return true;
|
||||
@ -609,7 +609,7 @@ void VM_Version::check_virtualizations() {
|
||||
if (check_info_file(pname_file, "KVM", KVM, "VMWare", VMWare)) {
|
||||
return;
|
||||
}
|
||||
check_info_file(tname_file, "Xen", XenPVHVM, NULL, NoDetectedVirtualization);
|
||||
check_info_file(tname_file, "Xen", XenPVHVM, nullptr, NoDetectedVirtualization);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -51,9 +51,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
|
||||
const int stub_code_length = code_size_limit(true);
|
||||
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
// Can be null if there is no free space in the code cache.
|
||||
if (s == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Count unused bytes in instruction sequences of variable size.
|
||||
@ -118,7 +118,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ cbz(rmethod, L);
|
||||
__ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
|
||||
__ cbnz(rscratch1, L);
|
||||
__ stop("Vtable entry is NULL");
|
||||
__ stop("Vtable entry is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
@ -141,9 +141,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
|
||||
const int stub_code_length = code_size_limit(false);
|
||||
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
// Can be null if there is no free space in the code cache.
|
||||
if (s == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Count unused bytes in instruction sequences of variable size.
|
||||
@ -241,7 +241,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
// We force resolving of the call site by jumping to the "handle
|
||||
// wrong method" stub, and so let the interpreter runtime do all the
|
||||
// dirty work.
|
||||
assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
|
||||
assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
masm->flush();
|
||||
|
Loading…
x
Reference in New Issue
Block a user