8301496: Replace NULL with nullptr in cpu/riscv

Reviewed-by: dholmes, fyang
This commit is contained in:
Johan Sjölen 2023-04-14 09:53:46 +00:00
parent 54bf370079
commit d2ce04bb10
45 changed files with 573 additions and 573 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -66,7 +66,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-(frame::interpreter_frame_initial_sp_offset) + entry_size; -(frame::interpreter_frame_initial_sp_offset) + entry_size;
const int stub_code = frame::entry_frame_after_call_words; const int stub_code = frame::entry_frame_after_call_words;
assert_cond(method != NULL); assert_cond(method != nullptr);
const int method_stack = (method->max_locals() + method->max_stack()) * const int method_stack = (method->max_locals() + method->max_stack()) *
Interpreter::stackElementWords; Interpreter::stackElementWords;
return (overhead_size + method_stack + stub_code); return (overhead_size + method_stack + stub_code);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -381,7 +381,7 @@ public:
} }
static void patch(address a, unsigned msb, unsigned lsb, unsigned val) { static void patch(address a, unsigned msb, unsigned lsb, unsigned val) {
assert_cond(a != NULL); assert_cond(a != nullptr);
assert_cond(msb >= lsb && msb <= 31); assert_cond(msb >= lsb && msb <= 31);
unsigned nbits = msb - lsb + 1; unsigned nbits = msb - lsb + 1;
guarantee(val < (1U << nbits), "Field too big for insn"); guarantee(val < (1U << nbits), "Field too big for insn");
@ -1926,7 +1926,7 @@ public:
// patch a 16-bit instruction. // patch a 16-bit instruction.
static void c_patch(address a, unsigned msb, unsigned lsb, uint16_t val) { static void c_patch(address a, unsigned msb, unsigned lsb, uint16_t val) {
assert_cond(a != NULL); assert_cond(a != nullptr);
assert_cond(msb >= lsb && msb <= 15); assert_cond(msb >= lsb && msb <= 15);
unsigned nbits = msb - lsb + 1; unsigned nbits = msb - lsb + 1;
guarantee(val < (1U << nbits), "Field too big for insn"); guarantee(val < (1U << nbits), "Field too big for insn");
@ -2171,7 +2171,7 @@ public:
emit_int16(insn); \ emit_int16(insn); \
} \ } \
void NAME(address dest) { \ void NAME(address dest) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding"); \ assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding"); \
c_j(distance); \ c_j(distance); \
@ -2199,7 +2199,7 @@ public:
emit_int16(insn); \ emit_int16(insn); \
} \ } \
void NAME(Register Rs1, address dest) { \ void NAME(Register Rs1, address dest) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
assert(is_simm9(distance) && ((distance % 2) == 0), "invalid encoding"); \ assert(is_simm9(distance) && ((distance % 2) == 0), "invalid encoding"); \
NAME(Rs1, distance); \ NAME(Rs1, distance); \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -49,7 +49,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
}); });
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset())); __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
@ -253,7 +253,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
} }
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a = NULL; address a = nullptr;
if (_info->deoptimize_on_exception()) { if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here. // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
@ -322,7 +322,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
Address resolve(SharedRuntime::get_resolve_static_call_stub(), Address resolve(SharedRuntime::get_resolve_static_call_stub(),
relocInfo::static_call_type); relocInfo::static_call_type);
address call = __ trampoline_call(resolve); address call = __ trampoline_call(resolve);
if (call == NULL) { if (call == nullptr) {
ce->bailout("trampoline stub overflow"); ce->bailout("trampoline stub overflow");
return; return;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -263,7 +263,7 @@ void LIR_Assembler::arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr rig
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
CodeEmitInfo* info, bool pop_fpu_stack) { CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {
arith_op_single_cpu(code, left, right, dest); arith_op_single_cpu(code, left, right, dest);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -42,7 +42,7 @@ void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register l
arraycopy_store_args(src, src_pos, length, dst, dst_pos); arraycopy_store_args(src, src_pos, length, dst, dst_pos);
address copyfunc_addr = StubRoutines::generic_arraycopy(); address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required"); assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
// The arguments are in java calling convention so we shift them // The arguments are in java calling convention so we shift them
// to C convention // to C convention
@ -80,7 +80,7 @@ void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register l
void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Register length, void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Register length,
Register dst, Register dst_pos, Register tmp, Register dst, Register dst_pos, Register tmp,
CodeStub *stub, int flags) { CodeStub *stub, int flags) {
// test for NULL // test for null
if (flags & LIR_OpArrayCopy::src_null_check) { if (flags & LIR_OpArrayCopy::src_null_check) {
__ beqz(src, *stub->entry(), /* is_far */ true); __ beqz(src, *stub->entry(), /* is_far */ true);
} }
@ -220,7 +220,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
PUSH(src, dst); PUSH(src, dst);
__ load_klass(src, src); __ load_klass(src, src);
__ load_klass(dst, dst); __ load_klass(dst, dst);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
PUSH(src, dst); PUSH(src, dst);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
@ -231,7 +231,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
POP(src, dst); POP(src, dst);
address copyfunc_addr = StubRoutines::checkcast_arraycopy(); address copyfunc_addr = StubRoutines::checkcast_arraycopy();
if (copyfunc_addr != NULL) { // use stub if available if (copyfunc_addr != nullptr) { // use stub if available
arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags); arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags);
} }
@ -242,7 +242,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
} }
void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) { void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) {
assert(default_type != NULL, "NULL default_type!"); assert(default_type != nullptr, "null default_type!");
BasicType basic_type = default_type->element_type()->basic_type(); BasicType basic_type = default_type->element_type()->basic_type();
if (basic_type == T_ARRAY) { basic_type = T_OBJECT; } if (basic_type == T_ARRAY) { basic_type = T_OBJECT; }
@ -299,16 +299,16 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
int flags = op->flags(); int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) { basic_type = T_OBJECT; } if (is_reference_type(basic_type)) { basic_type = T_OBJECT; }
// if we don't know anything, just go through the generic arraycopy // if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) { if (default_type == nullptr) {
generic_arraycopy(src, src_pos, length, dst, dst_pos, stub); generic_arraycopy(src, src_pos, length, dst, dst_pos, stub);
return; return;
} }
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(),
"must be true at this point"); "must be true at this point");
arraycopy_simple_check(src, src_pos, length, dst, dst_pos, tmp, stub, flags); arraycopy_simple_check(src, src_pos, length, dst, dst_pos, tmp, stub, flags);
@ -330,11 +330,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
const char *name = NULL; const char *name = nullptr;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
CodeBlob *cb = CodeCache::find_blob(entry); CodeBlob *cb = CodeCache::find_blob(entry);
if (cb != NULL) { if (cb != nullptr) {
__ far_call(RuntimeAddress(entry)); __ far_call(RuntimeAddress(entry));
} else { } else {
const int args_num = 3; const int args_num = 3;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -201,7 +201,7 @@ Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) {
void LIR_Assembler::osr_entry() { void LIR_Assembler::osr_entry() {
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
BlockBegin* osr_entry = compilation()->hir()->osr_entry(); BlockBegin* osr_entry = compilation()->hir()->osr_entry();
guarantee(osr_entry != NULL, "NULL osr_entry!"); guarantee(osr_entry != nullptr, "null osr_entry!");
ValueStack* entry_state = osr_entry->state(); ValueStack* entry_state = osr_entry->state();
int number_of_locks = entry_state->locks_size(); int number_of_locks = entry_state->locks_size();
@ -251,7 +251,7 @@ void LIR_Assembler::osr_entry() {
Label L; Label L;
__ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); __ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
__ bnez(t0, L); __ bnez(t0, L);
__ stop("locked object is NULL"); __ stop("locked object is null");
__ bind(L); __ bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -288,7 +288,7 @@ int LIR_Assembler::check_icache() {
} }
void LIR_Assembler::jobject2reg(jobject o, Register reg) { void LIR_Assembler::jobject2reg(jobject o, Register reg) {
if (o == NULL) { if (o == nullptr) {
__ mv(reg, zr); __ mv(reg, zr);
} else { } else {
__ movoop(reg, o); __ movoop(reg, o);
@ -309,7 +309,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
int LIR_Assembler::emit_exception_handler() { int LIR_Assembler::emit_exception_handler() {
// generate code for exception handler // generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size()); address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// not enough space left for the handler // not enough space left for the handler
bailout("exception handler overflow"); bailout("exception handler overflow");
return -1; return -1;
@ -356,7 +356,7 @@ int LIR_Assembler::emit_unwind_handler() {
} }
// Perform needed unlocking // Perform needed unlocking
MonitorExitStub* stub = NULL; MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) { if (method()->is_synchronized()) {
monitor_address(0, FrameMap::r10_opr); monitor_address(0, FrameMap::r10_opr);
stub = new MonitorExitStub(FrameMap::r10_opr, true, 0); stub = new MonitorExitStub(FrameMap::r10_opr, true, 0);
@ -384,7 +384,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly // Emit the slow path assembly
if (stub != NULL) { if (stub != nullptr) {
stub->emit_code(this); stub->emit_code(this);
} }
@ -394,7 +394,7 @@ int LIR_Assembler::emit_unwind_handler() {
int LIR_Assembler::emit_deopt_handler() { int LIR_Assembler::emit_deopt_handler() {
// generate code for exception handler // generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size()); address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// not enough space left for the handler // not enough space left for the handler
bailout("deopt handler overflow"); bailout("deopt handler overflow");
return -1; return -1;
@ -427,7 +427,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
} }
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
guarantee(info != NULL, "Shouldn't be NULL"); guarantee(info != nullptr, "Shouldn't be null");
__ get_polling_page(t0, relocInfo::poll_type); __ get_polling_page(t0, relocInfo::poll_type);
add_debug_info_for_branch(info); // This isn't just debug info: add_debug_info_for_branch(info); // This isn't just debug info:
// it's the oop map // it's the oop map
@ -445,7 +445,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
assert(src->is_constant(), "should not call otherwise"); assert(src->is_constant(), "should not call otherwise");
assert(dest->is_register(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise");
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
address const_addr = NULL; address const_addr = nullptr;
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
@ -482,13 +482,13 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_FLOAT: case T_FLOAT:
const_addr = float_constant(c->as_jfloat()); const_addr = float_constant(c->as_jfloat());
assert(const_addr != NULL, "must create float constant in the constant table"); assert(const_addr != nullptr, "must create float constant in the constant table");
__ flw(dest->as_float_reg(), InternalAddress(const_addr)); __ flw(dest->as_float_reg(), InternalAddress(const_addr));
break; break;
case T_DOUBLE: case T_DOUBLE:
const_addr = double_constant(c->as_jdouble()); const_addr = double_constant(c->as_jdouble());
assert(const_addr != NULL, "must create double constant in the constant table"); assert(const_addr != nullptr, "must create double constant in the constant table");
__ fld(dest->as_double_reg(), InternalAddress(const_addr)); __ fld(dest->as_double_reg(), InternalAddress(const_addr));
break; break;
@ -503,15 +503,15 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { switch (c->type()) {
case T_OBJECT: case T_OBJECT:
if (c->as_jobject() == NULL) { if (c->as_jobject() == nullptr) {
__ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix())); __ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
} else { } else {
const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
reg2stack(FrameMap::t1_opr, dest, c->type(), false); reg2stack(FrameMap::t1_opr, dest, c->type(), false);
} }
break; break;
case T_ADDRESS: // fall through case T_ADDRESS: // fall through
const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL); const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
reg2stack(FrameMap::t1_opr, dest, c->type(), false); reg2stack(FrameMap::t1_opr, dest, c->type(), false);
case T_INT: // fall through case T_INT: // fall through
case T_FLOAT: case T_FLOAT:
@ -582,7 +582,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere(); ShouldNotReachHere();
insn = &MacroAssembler::sd; // unreachable insn = &MacroAssembler::sd; // unreachable
} }
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }
(_masm->*insn)(zr, as_Address(to_addr), t0); (_masm->*insn)(zr, as_Address(to_addr), t0);
@ -730,7 +730,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
ShouldNotReachHere(); ShouldNotReachHere();
} }
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check(null_check_here, info); add_debug_info_for_null_check(null_check_here, info);
} }
} }
@ -800,7 +800,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
return; return;
} }
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }
@ -901,12 +901,12 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
/* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true); /* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true);
Label done; Label done;
move_op(opr2, result, type, lir_patch_none, NULL, move_op(opr2, result, type, lir_patch_none, nullptr,
false, // pop_fpu_stack false, // pop_fpu_stack
false); // wide false); // wide
__ j(done); __ j(done);
__ bind(label); __ bind(label);
move_op(opr1, result, type, lir_patch_none, NULL, move_op(opr1, result, type, lir_patch_none, nullptr,
false, // pop_fpu_stack false, // pop_fpu_stack
false); // wide false); // wide
__ bind(done); __ bind(done);
@ -915,7 +915,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
LIR_Condition condition = op->cond(); LIR_Condition condition = op->cond();
if (condition == lir_cond_always) { if (condition == lir_cond_always) {
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_branch(op->info()); add_debug_info_for_branch(op->info());
} }
} else { } else {
@ -1078,12 +1078,12 @@ void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfil
void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) { void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != nullptr, "Should have method");
int bci = op->profiled_bci(); int bci = op->profiled_bci();
*md = method->method_data_or_null(); *md = method->method_data_or_null();
guarantee(*md != NULL, "Sanity"); guarantee(*md != nullptr, "Sanity");
*data = ((*md)->bci_to_data(bci)); *data = ((*md)->bci_to_data(bci));
assert(*data != NULL, "need data for type check"); assert(*data != nullptr, "need data for type check");
assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
} }
@ -1118,7 +1118,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe
} }
} else { } else {
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtytpe_slow_path(...) // call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
__ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
@ -1165,8 +1165,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register Rtmp1 = noreg; Register Rtmp1 = noreg;
// check if it needs to be profiled // check if it needs to be profiled
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
const bool should_profile = op->should_profile(); const bool should_profile = op->should_profile();
if (should_profile) { if (should_profile) {
@ -1354,7 +1354,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
address call = __ trampoline_call(Address(op->addr(), rtype)); address call = __ trampoline_call(Address(op->addr(), rtype));
if (call == NULL) { if (call == nullptr) {
bailout("trampoline stub overflow"); bailout("trampoline stub overflow");
return; return;
} }
@ -1364,7 +1364,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
address call = __ ic_call(op->addr()); address call = __ ic_call(op->addr());
if (call == NULL) { if (call == nullptr) {
bailout("trampoline stub overflow"); bailout("trampoline stub overflow");
return; return;
} }
@ -1376,7 +1376,7 @@ void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
MacroAssembler::assert_alignment(call_pc); MacroAssembler::assert_alignment(call_pc);
address stub = __ start_a_stub(call_stub_size()); address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) { if (stub == nullptr) {
bailout("static call stub overflow"); bailout("static call stub overflow");
return; return;
} }
@ -1500,7 +1500,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register(); Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register(); Register lock = op->lock_opr()->as_register();
if (UseHeavyMonitors) { if (UseHeavyMonitors) {
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
__ null_check(obj); __ null_check(obj);
} }
@ -1509,7 +1509,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible // add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info()); add_debug_info_for_null_check(null_check_offset, op->info());
} }
} else if (op->code() == lir_unlock) { } else if (op->code() == lir_unlock) {
@ -1526,7 +1526,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register(); Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info(); CodeEmitInfo* info = op->info();
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }
@ -1544,9 +1544,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types // Update counter for all call types
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
guarantee(md != NULL, "Sanity"); guarantee(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
@ -1559,7 +1559,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, recv); assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do // statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -1582,7 +1582,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time // VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) { if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ mov_metadata(t1, known_klass->constant_encoding()); __ mov_metadata(t1, known_klass->constant_encoding());
__ sd(t1, recv_addr); __ sd(t1, recv_addr);
@ -1618,8 +1618,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); }
void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass, void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
Register tmp, Label &next, Label &none, Register tmp, Label &next, Label &none,
Address mdo_addr) { Address mdo_addr) {
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != NULL) { if (exact_klass != nullptr) {
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
} else { } else {
__ load_klass(tmp, tmp); __ load_klass(tmp, tmp);
@ -1650,7 +1650,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
__ beqz(t0, next); __ beqz(t0, next);
} }
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ld(tmp, mdo_addr); __ ld(tmp, mdo_addr);
@ -1676,7 +1676,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
Address mdo_addr, Label &next) { Address mdo_addr, Label &next) {
// There's a single possible klass at this profile point // There's a single possible klass at this profile point
assert(exact_klass != NULL, "should be"); assert(exact_klass != nullptr, "should be");
if (TypeEntries::is_type_none(current_klass)) { if (TypeEntries::is_type_none(current_klass)) {
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
__ ld(t1, mdo_addr); __ ld(t1, mdo_addr);
@ -1705,7 +1705,7 @@ void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_kla
// first time here. Set profile type. // first time here. Set profile type.
__ sd(tmp, mdo_addr); __ sd(tmp, mdo_addr);
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ ld(tmp, mdo_addr); __ ld(tmp, mdo_addr);
@ -1744,7 +1744,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Label update, next, none; Label update, next, none;
bool do_null = !not_null; bool do_null = !not_null;
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
assert(do_null || do_update, "why are we here?"); assert(do_null || do_update, "why are we here?");
@ -1769,7 +1769,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (do_update) { if (do_update) {
#ifdef ASSERT #ifdef ASSERT
if (exact_klass != NULL) { if (exact_klass != nullptr) {
check_exact_klass(tmp, exact_klass); check_exact_klass(tmp, exact_klass);
} }
#endif #endif
@ -1840,7 +1840,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
assert(!tmp->is_valid(), "don't need temporary"); assert(!tmp->is_valid(), "don't need temporary");
CodeBlob *cb = CodeCache::find_blob(dest); CodeBlob *cb = CodeCache::find_blob(dest);
if (cb != NULL) { if (cb != nullptr) {
__ far_call(RuntimeAddress(dest)); __ far_call(RuntimeAddress(dest));
} else { } else {
RuntimeAddress target(dest); RuntimeAddress target(dest);
@ -1851,7 +1851,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
}); });
} }
if (info != NULL) { if (info != nullptr) {
add_call_info_here(info); add_call_info_here(info);
} }
__ post_call_nop(); __ post_call_nop();
@ -1992,7 +1992,7 @@ int LIR_Assembler::array_element_size(BasicType type) const {
// bailout case the pointer won't be to unique storage. // bailout case the pointer won't be to unique storage.
address LIR_Assembler::float_constant(float f) { address LIR_Assembler::float_constant(float f) {
address const_addr = __ float_constant(f); address const_addr = __ float_constant(f);
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return __ code()->consts()->start(); return __ code()->consts()->start();
} else { } else {
@ -2002,7 +2002,7 @@ address LIR_Assembler::float_constant(float f) {
address LIR_Assembler::double_constant(double d) { address LIR_Assembler::double_constant(double d) {
address const_addr = __ double_constant(d); address const_addr = __ double_constant(d);
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return __ code()->consts()->start(); return __ code()->consts()->start();
} else { } else {
@ -2012,7 +2012,7 @@ address LIR_Assembler::double_constant(double d) {
address LIR_Assembler::int_constant(jlong n) { address LIR_Assembler::int_constant(jlong n) {
address const_addr = __ long_constant(n); address const_addr = __ long_constant(n);
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return __ code()->consts()->start(); return __ code()->consts()->start();
} else { } else {
@ -2042,7 +2042,7 @@ void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
} }
void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
address target = NULL; address target = nullptr;
switch (patching_id(info)) { switch (patching_id(info)) {
case PatchingStub::access_field_id: case PatchingStub::access_field_id:
@ -2109,8 +2109,8 @@ void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
// check if it needs to be profiled // check if it needs to be profiled
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
if (should_profile) { if (should_profile) {
data_check(op, &md, &data); data_check(op, &md, &data);
@ -2179,7 +2179,7 @@ void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo,
// get instance klass (it's already uncompressed) // get instance klass (it's already uncompressed)
__ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); __ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtype_slow_path(...) // call out-of-line instance of __ check_klass_subtype_slow_path(...)
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
__ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
@ -2199,10 +2199,10 @@ void LIR_Assembler::const2reg_helper(LIR_Opr src) {
case T_OBJECT: case T_OBJECT:
case T_ARRAY: case T_ARRAY:
case T_METADATA: case T_METADATA:
const2reg(src, FrameMap::t0_opr, lir_patch_none, NULL); const2reg(src, FrameMap::t0_opr, lir_patch_none, nullptr);
break; break;
case T_LONG: case T_LONG:
const2reg(src, FrameMap::t0_long_opr, lir_patch_none, NULL); const2reg(src, FrameMap::t0_long_opr, lir_patch_none, nullptr);
break; break;
case T_FLOAT: case T_FLOAT:
case T_DOUBLE: case T_DOUBLE:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -62,7 +62,7 @@ private:
void caswu(Register addr, Register newval, Register cmpval); void caswu(Register addr, Register newval, Register cmpval);
void casl(Register addr, Register newval, Register cmpval); void casl(Register addr, Register newval, Register cmpval);
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL); void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = nullptr);
void deoptimize_trap(CodeEmitInfo *info); void deoptimize_trap(CodeEmitInfo *info);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -108,41 +108,41 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
if (v->type()->as_IntConstant() != NULL) { if (v->type()->as_IntConstant() != nullptr) {
return v->type()->as_IntConstant()->value() == 0; return v->type()->as_IntConstant()->value() == 0;
} else if (v->type()->as_LongConstant() != NULL) { } else if (v->type()->as_LongConstant() != nullptr) {
return v->type()->as_LongConstant()->value() == 0; return v->type()->as_LongConstant()->value() == 0;
} else if (v->type()->as_ObjectConstant() != NULL) { } else if (v->type()->as_ObjectConstant() != nullptr) {
return v->type()->as_ObjectConstant()->value()->is_null_object(); return v->type()->as_ObjectConstant()->value()->is_null_object();
} else if (v->type()->as_FloatConstant() != NULL) { } else if (v->type()->as_FloatConstant() != nullptr) {
return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f; return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f;
} else if (v->type()->as_DoubleConstant() != NULL) { } else if (v->type()->as_DoubleConstant() != nullptr) {
return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0; return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0;
} }
return false; return false;
} }
bool LIRGenerator::can_inline_as_constant(Value v) const { bool LIRGenerator::can_inline_as_constant(Value v) const {
if (v->type()->as_IntConstant() != NULL) { if (v->type()->as_IntConstant() != nullptr) {
int value = v->type()->as_IntConstant()->value(); int value = v->type()->as_IntConstant()->value();
// "-value" must be defined for value may be used for sub // "-value" must be defined for value may be used for sub
return Assembler::is_simm12(value) && Assembler::is_simm12(- value); return Assembler::is_simm12(value) && Assembler::is_simm12(- value);
} else if (v->type()->as_ObjectConstant() != NULL) { } else if (v->type()->as_ObjectConstant() != nullptr) {
return v->type()->as_ObjectConstant()->value()->is_null_object(); return v->type()->as_ObjectConstant()->value()->is_null_object();
} else if (v->type()->as_LongConstant() != NULL) { } else if (v->type()->as_LongConstant() != nullptr) {
long value = v->type()->as_LongConstant()->value(); long value = v->type()->as_LongConstant()->value();
// "-value" must be defined for value may be used for sub // "-value" must be defined for value may be used for sub
return Assembler::is_simm12(value) && Assembler::is_simm12(- value); return Assembler::is_simm12(value) && Assembler::is_simm12(- value);
} else if (v->type()->as_FloatConstant() != NULL) { } else if (v->type()->as_FloatConstant() != nullptr) {
return v->type()->as_FloatConstant()->value() == 0.0f; return v->type()->as_FloatConstant()->value() == 0.0f;
} else if (v->type()->as_DoubleConstant() != NULL) { } else if (v->type()->as_DoubleConstant() != nullptr) {
return v->type()->as_DoubleConstant()->value() == 0.0; return v->type()->as_DoubleConstant()->value() == 0.0;
} }
return false; return false;
} }
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
if (c->as_constant() != NULL) { if (c->as_constant() != nullptr) {
long constant = 0; long constant = 0;
switch (c->type()) { switch (c->type()) {
case T_INT: constant = c->as_jint(); break; case T_INT: constant = c->as_jint(); break;
@ -275,7 +275,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop // "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT); LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL; CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) { if (x->needs_null_check()) {
info_for_exception = state_for(x); info_for_exception = state_for(x);
} }
@ -419,7 +419,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
} }
} }
rlock_result(x); rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
} }
} }
@ -464,9 +464,9 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
LIR_Opr ill = LIR_OprFact::illegalOpr; LIR_Opr ill = LIR_OprFact::illegalOpr;
if (x->op() == Bytecodes::_irem) { if (x->op() == Bytecodes::_irem) {
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
} else if (x->op() == Bytecodes::_idiv) { } else if (x->op() == Bytecodes::_idiv) {
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
} }
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
@ -500,7 +500,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
// when an operand with use count 1 is the left operand, then it is // when an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary // likely that no move for 2-operand-LIR-form is necessary
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands(); x->swap_operands();
} }
@ -522,7 +522,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
value.load_item(); value.load_item();
if (count.is_constant()) { if (count.is_constant()) {
assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be"); assert(count.type()->as_IntConstant() != nullptr || count.type()->as_LongConstant() != nullptr , "should be");
count.dont_load_item(); count.dont_load_item();
} else { } else {
count.load_item(); count.load_item();
@ -672,7 +672,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
LIR_Opr calc_result = rlock_result(x); LIR_Opr calc_result = rlock_result(x);
LIR_Opr result_reg = result_register_for(x->type()); LIR_Opr result_reg = result_register_for(x->type());
CallingConvention* cc = NULL; CallingConvention* cc = nullptr;
if (x->id() == vmIntrinsics::_dpow) { if (x->id() == vmIntrinsics::_dpow) {
LIRItem value1(x->argument_at(1), this); LIRItem value1(x->argument_at(1), this);
@ -694,31 +694,31 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
switch (x->id()) { switch (x->id()) {
case vmIntrinsics::_dexp: case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dexp() != nullptr) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dlog: case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dlog() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dlog10: case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dlog10() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dsin: case vmIntrinsics::_dsin:
if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dsin() != nullptr) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dcos: case vmIntrinsics::_dcos:
if (StubRoutines::dcos() != NULL) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dcos() != nullptr) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dtan: case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dtan() != nullptr) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); }
break; break;
case vmIntrinsics::_dpow: case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); } if (StubRoutines::dpow() != nullptr) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); }
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
@ -762,7 +762,7 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
set_no_result(x); set_no_result(x);
int flags; int flags;
ciArrayKlass* expected_type = NULL; ciArrayKlass* expected_type = nullptr;
arraycopy_helper(x, &flags, &expected_type); arraycopy_helper(x, &flags, &expected_type);
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
@ -869,7 +869,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); LIRItem length(x->length(), this);
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
@ -902,14 +902,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); Values* dims = x->dims();
int i = dims->length(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) { while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this); LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size); items->at_put(i, size);
} }
// Evaluate state_for early since it may emit code. // Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
@ -956,7 +956,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) { void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this); LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || if (!x->klass()->is_loaded() ||
(PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register, // must do this before locking the destination register as an oop register,
@ -970,13 +970,13 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
(x->needs_exception_state() ? state_for(x) : (x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true /*ignore_xhandler*/ )); state_for(x, x->state_before(), true /*ignore_xhandler*/ ));
CodeStub* stub = NULL; CodeStub* stub = nullptr;
if (x->is_incompatible_class_change_check()) { if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
info_for_exception); info_for_exception);
} else if (x->is_invokespecial_receiver_check()) { } else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception, stub = new DeoptimizeStub(info_for_exception,
Deoptimization::Reason_class_check, Deoptimization::Reason_class_check,
Deoptimization::Action_none); Deoptimization::Action_none);
@ -999,7 +999,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
// result and test object may not be in same register // result and test object may not be in same register
LIR_Opr reg = rlock_result(x); LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if ((!x->klass()->is_loaded() || PatchALot)) { if ((!x->klass()->is_loaded() || PatchALot)) {
// must do this before locking the destination register as an oop register // must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());

View File

@ -80,7 +80,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// displaced header address in the object header - if it is not the same, get the // displaced header address in the object header - if it is not the same, get the
// object header instead // object header instead
la(t1, Address(obj, hdr_offset)); la(t1, Address(obj, hdr_offset));
cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL); cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/nullptr);
// if the object header was the same, we're done // if the object header was the same, we're done
// if the object header was not the same, it is now in the hdr register // if the object header was not the same, it is now in the hdr register
// => test if it is a stack pointer into the same stack (recursive locking), i.e.: // => test if it is a stack pointer into the same stack (recursive locking), i.e.:
@ -99,7 +99,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
mv(t0, aligned_mask - (int)os::vm_page_size()); mv(t0, aligned_mask - (int)os::vm_page_size());
andr(hdr, hdr, t0); andr(hdr, hdr, t0);
// for recursive locking, the result is zero => save it in the displaced header // for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking) // location (null in the displaced hdr location indicates recursive locking)
sd(hdr, Address(disp_hdr, 0)); sd(hdr, Address(disp_hdr, 0));
// otherwise we don't care about the result and handle locking via runtime call // otherwise we don't care about the result and handle locking via runtime call
bnez(hdr, slow_case, /* is_far */ true); bnez(hdr, slow_case, /* is_far */ true);
@ -117,7 +117,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// load displaced header // load displaced header
ld(hdr, Address(disp_hdr, 0)); ld(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking // if the loaded hdr is null we had recursive locking
// if we had recursive locking, we are done // if we had recursive locking, we are done
beqz(hdr, done); beqz(hdr, done);
// load object // load object
@ -298,7 +298,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) { void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
verify_oop(receiver); verify_oop(receiver);
// explicit NULL check not needed since load from [klass_offset] causes a trap // explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
assert_different_registers(receiver, iCache, t0, t2); assert_different_registers(receiver, iCache, t0, t2);
@ -314,7 +314,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
// Insert nmethod entry barrier into frame. // Insert nmethod entry barrier into frame.
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
} }
void C1_MacroAssembler::remove_frame(int framesize) { void C1_MacroAssembler::remove_frame(int framesize) {
@ -398,8 +398,8 @@ static c1_float_cond_branch_insn c1_float_cond_branch[] =
(c1_float_cond_branch_insn)&MacroAssembler::float_ble, (c1_float_cond_branch_insn)&MacroAssembler::float_ble,
(c1_float_cond_branch_insn)&MacroAssembler::float_bge, (c1_float_cond_branch_insn)&MacroAssembler::float_bge,
(c1_float_cond_branch_insn)&MacroAssembler::float_bgt, (c1_float_cond_branch_insn)&MacroAssembler::float_bgt,
NULL, // lir_cond_belowEqual nullptr, // lir_cond_belowEqual
NULL, // lir_cond_aboveEqual nullptr, // lir_cond_aboveEqual
/* DOUBLE branches */ /* DOUBLE branches */
(c1_float_cond_branch_insn)&MacroAssembler::double_beq, (c1_float_cond_branch_insn)&MacroAssembler::double_beq,
@ -408,8 +408,8 @@ static c1_float_cond_branch_insn c1_float_cond_branch[] =
(c1_float_cond_branch_insn)&MacroAssembler::double_ble, (c1_float_cond_branch_insn)&MacroAssembler::double_ble,
(c1_float_cond_branch_insn)&MacroAssembler::double_bge, (c1_float_cond_branch_insn)&MacroAssembler::double_bge,
(c1_float_cond_branch_insn)&MacroAssembler::double_bgt, (c1_float_cond_branch_insn)&MacroAssembler::double_bgt,
NULL, // lir_cond_belowEqual nullptr, // lir_cond_belowEqual
NULL // lir_cond_aboveEqual nullptr // lir_cond_aboveEqual
}; };
void C1_MacroAssembler::c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label, void C1_MacroAssembler::c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -106,7 +106,7 @@ using MacroAssembler::null_check;
void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN; void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN;
// This platform only uses signal-based null checks. The Label is not needed. // This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); } void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
void load_parameter(int offset_in_words, Register reg); void load_parameter(int offset_in_words, Register reg);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -219,7 +219,7 @@ StubFrame::~StubFrame() {
} else { } else {
__ should_not_reach_here(); __ should_not_reach_here();
} }
_sasm = NULL; _sasm = nullptr;
} }
#undef __ #undef __
@ -259,7 +259,7 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
OopMap* oop_map = new OopMap(frame_size_in_slots, 0); OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
assert_cond(oop_map != NULL); assert_cond(oop_map != nullptr);
// caller save registers only, see FrameMap::initialize // caller save registers only, see FrameMap::initialize
// in c1_FrameMap_riscv.cpp for detail. // in c1_FrameMap_riscv.cpp for detail.
@ -368,7 +368,7 @@ void Runtime1::initialize_pd() {
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// make a frame and preserve the caller's caller-save registers // make a frame and preserve the caller's caller-save registers
OopMap* oop_map = save_live_registers(sasm); OopMap* oop_map = save_live_registers(sasm);
assert_cond(oop_map != NULL); assert_cond(oop_map != nullptr);
int call_offset = 0; int call_offset = 0;
if (!has_argument) { if (!has_argument) {
call_offset = __ call_RT(noreg, noreg, target); call_offset = __ call_RT(noreg, noreg, target);
@ -378,7 +378,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
call_offset = __ call_RT(noreg, noreg, target); call_offset = __ call_RT(noreg, noreg, target);
} }
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
return oop_maps; return oop_maps;
@ -392,8 +392,8 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
const Register exception_pc = x13; const Register exception_pc = x13;
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
OopMap* oop_map = NULL; OopMap* oop_map = nullptr;
switch (id) { switch (id) {
case forward_exception_id: case forward_exception_id:
@ -463,7 +463,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// compute the exception handler. // compute the exception handler.
// the exception oop and the throwing pc are read from the fields in JavaThread // the exception oop and the throwing pc are read from the fields in JavaThread
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
guarantee(oop_map != NULL, "NULL oop_map!"); guarantee(oop_map != nullptr, "null oop_map!");
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
// x10: handler address // x10: handler address
@ -561,10 +561,10 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// Note: This number affects also the RT-Call in generate_handle_exception because // Note: This number affects also the RT-Call in generate_handle_exception because
// the oop-map is shared for all calls. // the oop-map is shared for all calls.
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
OopMap* oop_map = save_live_registers(sasm); OopMap* oop_map = save_live_registers(sasm);
assert_cond(oop_map != NULL); assert_cond(oop_map != nullptr);
__ mv(c_rarg0, xthread); __ mv(c_rarg0, xthread);
Label retaddr; Label retaddr;
@ -578,7 +578,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
}); });
__ bind(retaddr); __ bind(retaddr);
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(__ offset(), oop_map); oop_maps->add_gc_map(__ offset(), oop_map);
// verify callee-saved register // verify callee-saved register
#ifdef ASSERT #ifdef ASSERT
@ -634,7 +634,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
bool save_fpu_registers = true; bool save_fpu_registers = true;
// stub code & info for the different stubs // stub code & info for the different stubs
OopMapSet* oop_maps = NULL; OopMapSet* oop_maps = nullptr;
switch (id) { switch (id) {
{ {
case forward_exception_id: case forward_exception_id:
@ -676,10 +676,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ enter(); __ enter();
OopMap* map = save_live_registers(sasm); OopMap* map = save_live_registers(sasm);
assert_cond(map != NULL); assert_cond(map != nullptr);
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers_except_r10(sasm); restore_live_registers_except_r10(sasm);
__ verify_oop(obj); __ verify_oop(obj);
@ -697,7 +697,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register method = x11; Register method = x11;
__ enter(); __ enter();
OopMap* map = save_live_registers(sasm); OopMap* map = save_live_registers(sasm);
assert_cond(map != NULL); assert_cond(map != nullptr);
const int bci_off = 0; const int bci_off = 0;
const int method_off = 1; const int method_off = 1;
@ -707,7 +707,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ld(method, Address(fp, method_off * BytesPerWord)); __ ld(method, Address(fp, method_off * BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm); restore_live_registers(sasm);
__ leave(); __ leave();
@ -746,7 +746,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ enter(); __ enter();
OopMap* map = save_live_registers(sasm); OopMap* map = save_live_registers(sasm);
assert_cond(map != NULL); assert_cond(map != nullptr);
int call_offset = 0; int call_offset = 0;
if (id == new_type_array_id) { if (id == new_type_array_id) {
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
@ -755,7 +755,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers_except_r10(sasm); restore_live_registers_except_r10(sasm);
@ -774,14 +774,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// x9: rank // x9: rank
// x12: address of 1st dimension // x12: address of 1st dimension
OopMap* map = save_live_registers(sasm); OopMap* map = save_live_registers(sasm);
assert_cond(map != NULL); assert_cond(map != nullptr);
__ mv(c_rarg1, x10); __ mv(c_rarg1, x10);
__ mv(c_rarg3, x12); __ mv(c_rarg3, x12);
__ mv(c_rarg2, x9); __ mv(c_rarg2, x9);
int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13); int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers_except_r10(sasm); restore_live_registers_except_r10(sasm);
@ -810,10 +810,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(register_finalizer); __ bind(register_finalizer);
__ enter(); __ enter();
OopMap* oop_map = save_live_registers(sasm); OopMap* oop_map = save_live_registers(sasm);
assert_cond(oop_map != NULL); assert_cond(oop_map != nullptr);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
// Now restore all the live registers // Now restore all the live registers
@ -864,7 +864,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass __ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass
Label miss; Label miss;
__ check_klass_subtype_slow_path(x14, x10, x12, x15, NULL, &miss); __ check_klass_subtype_slow_path(x14, x10, x12, x15, nullptr, &miss);
// fallthrough on success: // fallthrough on success:
__ mv(t0, 1); __ mv(t0, 1);
@ -886,7 +886,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{ {
StubFrame f(sasm, "monitorenter", dont_gc_arguments); StubFrame f(sasm, "monitorenter", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, save_fpu_registers); OopMap* map = save_live_registers(sasm, save_fpu_registers);
assert_cond(map != NULL); assert_cond(map != nullptr);
// Called with store_parameter and not C abi // Called with store_parameter and not C abi
f.load_argument(1, x10); // x10: object f.load_argument(1, x10); // x10: object
@ -895,7 +895,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm, save_fpu_registers); restore_live_registers(sasm, save_fpu_registers);
} }
@ -908,7 +908,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{ {
StubFrame f(sasm, "monitorexit", dont_gc_arguments); StubFrame f(sasm, "monitorexit", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, save_fpu_registers); OopMap* map = save_live_registers(sasm, save_fpu_registers);
assert_cond(map != NULL); assert_cond(map != nullptr);
// Called with store_parameter and not C abi // Called with store_parameter and not C abi
f.load_argument(0, x10); // x10: lock address f.load_argument(0, x10); // x10: lock address
@ -919,7 +919,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm, save_fpu_registers); restore_live_registers(sasm, save_fpu_registers);
} }
@ -929,16 +929,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{ {
StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
OopMap* oop_map = save_live_registers(sasm); OopMap* oop_map = save_live_registers(sasm);
assert_cond(oop_map != NULL); assert_cond(oop_map != nullptr);
f.load_argument(0, c_rarg1); f.load_argument(0, c_rarg1);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm); restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ leave(); __ leave();
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
} }
@ -1028,16 +1028,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
OopMap* map = save_live_registers(sasm); OopMap* map = save_live_registers(sasm);
assert_cond(map != NULL); assert_cond(map != nullptr);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm); restore_live_registers(sasm);
__ leave(); __ leave();
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -36,7 +36,7 @@ int C2SafepointPollStub::max_size() const {
} }
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) { void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
RuntimeAddress callback_addr(stub); RuntimeAddress callback_addr(stub);

View File

@ -551,16 +551,16 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle,
sub(t0, needle_len, 16); // small patterns still should be handled by simple algorithm sub(t0, needle_len, 16); // small patterns still should be handled by simple algorithm
bltz(t0, LINEARSEARCH); bltz(t0, LINEARSEARCH);
mv(result, zr); mv(result, zr);
RuntimeAddress stub = NULL; RuntimeAddress stub = nullptr;
if (isLL) { if (isLL) {
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ll()); stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ll());
assert(stub.target() != NULL, "string_indexof_linear_ll stub has not been generated"); assert(stub.target() != nullptr, "string_indexof_linear_ll stub has not been generated");
} else if (needle_isL) { } else if (needle_isL) {
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ul()); stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ul());
assert(stub.target() != NULL, "string_indexof_linear_ul stub has not been generated"); assert(stub.target() != nullptr, "string_indexof_linear_ul stub has not been generated");
} else { } else {
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_uu()); stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_uu());
assert(stub.target() != NULL, "string_indexof_linear_uu stub has not been generated"); assert(stub.target() != nullptr, "string_indexof_linear_uu stub has not been generated");
} }
address call = trampoline_call(stub); address call = trampoline_call(stub);
if (call == nullptr) { if (call == nullptr) {
@ -952,7 +952,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
} }
bind(STUB); bind(STUB);
RuntimeAddress stub = NULL; RuntimeAddress stub = nullptr;
switch (ae) { switch (ae) {
case StrIntrinsicNode::LL: case StrIntrinsicNode::LL:
stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_LL()); stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_LL());
@ -969,7 +969,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
assert(stub.target() != NULL, "compare_long_string stub has not been generated"); assert(stub.target() != nullptr, "compare_long_string stub has not been generated");
address call = trampoline_call(stub); address call = trampoline_call(stub);
if (call == nullptr) { if (call == nullptr) {
DEBUG_ONLY(reset_labels(DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START)); DEBUG_ONLY(reset_labels(DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START));
@ -1211,21 +1211,21 @@ static conditional_branch_insn conditional_branches[] =
/* SHORT branches */ /* SHORT branches */
(conditional_branch_insn)&MacroAssembler::beq, (conditional_branch_insn)&MacroAssembler::beq,
(conditional_branch_insn)&MacroAssembler::bgt, (conditional_branch_insn)&MacroAssembler::bgt,
NULL, // BoolTest::overflow nullptr, // BoolTest::overflow
(conditional_branch_insn)&MacroAssembler::blt, (conditional_branch_insn)&MacroAssembler::blt,
(conditional_branch_insn)&MacroAssembler::bne, (conditional_branch_insn)&MacroAssembler::bne,
(conditional_branch_insn)&MacroAssembler::ble, (conditional_branch_insn)&MacroAssembler::ble,
NULL, // BoolTest::no_overflow nullptr, // BoolTest::no_overflow
(conditional_branch_insn)&MacroAssembler::bge, (conditional_branch_insn)&MacroAssembler::bge,
/* UNSIGNED branches */ /* UNSIGNED branches */
(conditional_branch_insn)&MacroAssembler::beq, (conditional_branch_insn)&MacroAssembler::beq,
(conditional_branch_insn)&MacroAssembler::bgtu, (conditional_branch_insn)&MacroAssembler::bgtu,
NULL, nullptr,
(conditional_branch_insn)&MacroAssembler::bltu, (conditional_branch_insn)&MacroAssembler::bltu,
(conditional_branch_insn)&MacroAssembler::bne, (conditional_branch_insn)&MacroAssembler::bne,
(conditional_branch_insn)&MacroAssembler::bleu, (conditional_branch_insn)&MacroAssembler::bleu,
NULL, nullptr,
(conditional_branch_insn)&MacroAssembler::bgeu (conditional_branch_insn)&MacroAssembler::bgeu
}; };
@ -1234,21 +1234,21 @@ static float_conditional_branch_insn float_conditional_branches[] =
/* FLOAT SHORT branches */ /* FLOAT SHORT branches */
(float_conditional_branch_insn)&MacroAssembler::float_beq, (float_conditional_branch_insn)&MacroAssembler::float_beq,
(float_conditional_branch_insn)&MacroAssembler::float_bgt, (float_conditional_branch_insn)&MacroAssembler::float_bgt,
NULL, // BoolTest::overflow nullptr, // BoolTest::overflow
(float_conditional_branch_insn)&MacroAssembler::float_blt, (float_conditional_branch_insn)&MacroAssembler::float_blt,
(float_conditional_branch_insn)&MacroAssembler::float_bne, (float_conditional_branch_insn)&MacroAssembler::float_bne,
(float_conditional_branch_insn)&MacroAssembler::float_ble, (float_conditional_branch_insn)&MacroAssembler::float_ble,
NULL, // BoolTest::no_overflow nullptr, // BoolTest::no_overflow
(float_conditional_branch_insn)&MacroAssembler::float_bge, (float_conditional_branch_insn)&MacroAssembler::float_bge,
/* DOUBLE SHORT branches */ /* DOUBLE SHORT branches */
(float_conditional_branch_insn)&MacroAssembler::double_beq, (float_conditional_branch_insn)&MacroAssembler::double_beq,
(float_conditional_branch_insn)&MacroAssembler::double_bgt, (float_conditional_branch_insn)&MacroAssembler::double_bgt,
NULL, nullptr,
(float_conditional_branch_insn)&MacroAssembler::double_blt, (float_conditional_branch_insn)&MacroAssembler::double_blt,
(float_conditional_branch_insn)&MacroAssembler::double_bne, (float_conditional_branch_insn)&MacroAssembler::double_bne,
(float_conditional_branch_insn)&MacroAssembler::double_ble, (float_conditional_branch_insn)&MacroAssembler::double_ble,
NULL, nullptr,
(float_conditional_branch_insn)&MacroAssembler::double_bge (float_conditional_branch_insn)&MacroAssembler::double_bge
}; };
@ -1661,9 +1661,9 @@ void C2_MacroAssembler::reduce_minmax_FD_v(FloatRegister dst,
} }
bool C2_MacroAssembler::in_scratch_emit_size() { bool C2_MacroAssembler::in_scratch_emit_size() {
if (ciEnv::current()->task() != NULL) { if (ciEnv::current()->task() != nullptr) {
PhaseOutput* phase_output = Compile::current()->output(); PhaseOutput* phase_output = Compile::current()->output();
if (phase_output != NULL && phase_output->in_scratch_emit_size()) { if (phase_output != nullptr && phase_output->in_scratch_emit_size()) {
return true; return true;
} }
} }

View File

@ -44,7 +44,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// mv xmethod, 0 // mv xmethod, 0
// jalr -4 # to self // jalr -4 # to self
if (mark == NULL) { if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section. mark = cbuf.insts_mark(); // Get mark within main instrs section.
} }
@ -54,8 +54,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
address base = __ start_a_stub(to_interp_stub_size()); address base = __ start_a_stub(to_interp_stub_size());
int offset = __ offset(); int offset = __ offset();
if (base == NULL) { if (base == nullptr) {
return NULL; // CodeBuffer::expand failed return nullptr; // CodeBuffer::expand failed
} }
// static stub relocation stores the instruction address of the call // static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark)); __ relocate(static_stub_Relocation::spec(mark));
@ -86,7 +86,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(); address stub = find_stub();
guarantee(stub != NULL, "stub not found"); guarantee(stub != nullptr, "stub not found");
if (TraceICs) { if (TraceICs) {
ResourceMark rm; ResourceMark rm;
@ -114,7 +114,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub. // Reset stub.
address stub = static_stub->addr(); address stub = static_stub->addr();
assert(stub != NULL, "stub not found"); assert(stub != nullptr, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call"); assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder NativeMovConstReg* method_holder
@ -135,7 +135,7 @@ void CompiledDirectStaticCall::verify() {
// Verify stub. // Verify stub.
address stub = find_stub(); address stub = find_stub();
assert(stub != NULL, "no stub found for static call"); assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub); = nativeMovConstReg_at(stub);

View File

@ -147,7 +147,7 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
// so we compute locals "from scratch" rather than relativizing the value in the stack frame, which might include padding, // so we compute locals "from scratch" rather than relativizing the value in the stack frame, which might include padding,
// since we don't freeze the padding word (see recurse_freeze_interpreted_frame). // since we don't freeze the padding word (see recurse_freeze_interpreted_frame).
// at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts // at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts
*hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp(); *hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp();
// this line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 // this line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197
*hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1; *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -40,7 +40,7 @@ static const char* pd_cpu_opts() {
// the perfect job. In those cases, decode_instruction0 may kick in // the perfect job. In those cases, decode_instruction0 may kick in
// and do it right. // and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" // If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) { static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
return here; return here;
} }

View File

@ -73,7 +73,7 @@ public:
_captured_state_mask(captured_state_mask), _captured_state_mask(captured_state_mask),
_frame_complete(0), _frame_complete(0),
_frame_size_slots(0), _frame_size_slots(0),
_oop_maps(NULL) { _oop_maps(nullptr) {
} }
void generate(); void generate();

View File

@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// to construct the sender and do some validation of it. This goes a long way // to construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code // toward eliminating issues when we get in frame construction code
if (_cb != NULL) { if (_cb != nullptr) {
// First check if frame is complete and tester is reliable // First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod // Unfortunately we can only check frame complete for runtime stubs and nmethod
@ -119,10 +119,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
return fp_safe && is_entry_frame_valid(thread); return fp_safe && is_entry_frame_valid(thread);
} }
intptr_t* sender_sp = NULL; intptr_t* sender_sp = nullptr;
intptr_t* sender_unextended_sp = NULL; intptr_t* sender_unextended_sp = nullptr;
address sender_pc = NULL; address sender_pc = nullptr;
intptr_t* saved_fp = NULL; intptr_t* saved_fp = nullptr;
if (is_interpreted_frame()) { if (is_interpreted_frame()) {
// fp must be safe // fp must be safe
@ -182,7 +182,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc // We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) { if (sender_pc == nullptr || sender_blob == nullptr) {
return false; return false;
} }
@ -212,7 +212,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
if (nm != NULL) { if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) { nm->method()->is_method_handle_intrinsic()) {
return false; return false;
@ -250,7 +250,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
// Will the pc we fetch be non-zero (which we'll find at the oldest frame) // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
if ((address)this->fp()[return_addr_offset] == NULL) { return false; } if ((address)this->fp()[return_addr_offset] == nullptr) { return false; }
return true; return true;
} }
@ -274,7 +274,7 @@ void frame::patch_pc(Thread* thread, address pc) {
*pc_addr = pc; *pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc _pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching"); assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
_pc = original_pc; _pc = original_pc;
@ -339,7 +339,7 @@ void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
} }
frame frame::sender_for_entry_frame(RegisterMap* map) const { frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
// Java frame called from C; skip all C frames and return top C // Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender // frame of that chunk as the sender
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -365,11 +365,11 @@ bool frame::upcall_stub_frame_is_first() const {
assert(is_upcall_stub_frame(), "must be optimzed entry frame"); assert(is_upcall_stub_frame(), "must be optimzed entry frame");
UpcallStub* blob = _cb->as_upcall_stub(); UpcallStub* blob = _cb->as_upcall_stub();
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this); JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
return jfa->last_Java_sp() == NULL; return jfa->last_Java_sp() == nullptr;
} }
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
UpcallStub* blob = _cb->as_upcall_stub(); UpcallStub* blob = _cb->as_upcall_stub();
// Java frame called from C; skip all C frames and return top C // Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender // frame of that chunk as the sender
@ -400,7 +400,7 @@ void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp
// method anyway. // method anyway.
fr._unextended_sp = unextended_sp; fr._unextended_sp = unextended_sp;
assert_cond(nm != NULL); assert_cond(nm != nullptr);
address original_pc = nm->get_original_pc(&fr); address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains_inclusive(original_pc), assert(nm->insts_contains_inclusive(original_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)"); "original PC must be in the main code section of the compiled method (or must be immediately following it)");
@ -415,9 +415,9 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are // as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites. // returning to any of these call sites.
if (_cb != NULL) { if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != NULL) { if (sender_cm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC. // If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) || if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) { sender_cm->is_deopt_mh_entry(_pc)) {
@ -440,7 +440,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
intptr_t* unextended_sp = interpreter_frame_sender_sp(); intptr_t* unextended_sp = interpreter_frame_sender_sp();
#ifdef COMPILER2 #ifdef COMPILER2
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
if (map->update_map()) { if (map->update_map()) {
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
} }
@ -460,10 +460,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const { bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame"); assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks // These are reasonable sanity checks
if (fp() == NULL || (intptr_t(fp()) & (wordSize-1)) != 0) { if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false; return false;
} }
if (sp() == NULL || (intptr_t(sp()) & (wordSize-1)) != 0) { if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false; return false;
} }
if (fp() + interpreter_frame_initial_sp_offset < sp()) { if (fp() + interpreter_frame_initial_sp_offset < sp()) {
@ -522,7 +522,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
Method* method = interpreter_frame_method(); Method* method = interpreter_frame_method();
BasicType type = method->result_type(); BasicType type = method->result_type();
intptr_t* tos_addr = NULL; intptr_t* tos_addr = nullptr;
if (method->is_native()) { if (method->is_native()) {
tos_addr = (intptr_t*)sp(); tos_addr = (intptr_t*)sp();
if (type == T_FLOAT || type == T_DOUBLE) { if (type == T_FLOAT || type == T_DOUBLE) {
@ -541,7 +541,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
} else { } else {
oop* obj_p = (oop*)tos_addr; oop* obj_p = (oop*)tos_addr;
obj = (obj_p == NULL) ? (oop)NULL : *obj_p; obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
} }
assert(Universe::is_in_heap_or_null(obj), "sanity check"); assert(Universe::is_in_heap_or_null(obj), "sanity check");
*oop_result = obj; *oop_result = obj;
@ -610,7 +610,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
intptr_t *frame::initial_deoptimization_info() { intptr_t *frame::initial_deoptimization_info() {
// Not used on riscv, but we must return something. // Not used on riscv, but we must return something.
return NULL; return nullptr;
} }
#undef DESCRIBE_FP_OFFSET #undef DESCRIBE_FP_OFFSET
@ -625,11 +625,11 @@ frame::frame(void* ptr_sp, void* ptr_fp, void* pc) : _on_heap(false) {
void JavaFrameAnchor::make_walkable() { void JavaFrameAnchor::make_walkable() {
// last frame set? // last frame set?
if (last_Java_sp() == NULL) { return; } if (last_Java_sp() == nullptr) { return; }
// already walkable? // already walkable?
if (walkable()) { return; } if (walkable()) { return; }
vmassert(last_Java_sp() != NULL, "not called from Java code?"); vmassert(last_Java_sp() != nullptr, "not called from Java code?");
vmassert(last_Java_pc() == NULL, "already walkable"); vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1]; _last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong"); vmassert(walkable(), "something went wrong");
} }

View File

@ -39,11 +39,11 @@
// Constructors: // Constructors:
inline frame::frame() { inline frame::frame() {
_pc = NULL; _pc = nullptr;
_sp = NULL; _sp = nullptr;
_unextended_sp = NULL; _unextended_sp = nullptr;
_fp = NULL; _fp = nullptr;
_cb = NULL; _cb = nullptr;
_deopt_state = unknown; _deopt_state = unknown;
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
@ -58,11 +58,11 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
_unextended_sp = ptr_sp; _unextended_sp = ptr_sp;
_fp = ptr_fp; _fp = ptr_fp;
_pc = pc; _pc = pc;
_oop_map = NULL; _oop_map = nullptr;
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
setup(pc); setup(pc);
} }
@ -71,10 +71,10 @@ inline void frame::setup(address pc) {
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc), assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)"); "original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else { } else {
if (_cb == SharedRuntime::deopt_blob()) { if (_cb == SharedRuntime::deopt_blob()) {
@ -96,10 +96,10 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
_unextended_sp = unextended_sp; _unextended_sp = unextended_sp;
_fp = ptr_fp; _fp = ptr_fp;
_pc = pc; _pc = pc;
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = cb; _cb = cb;
_oop_map = NULL; _oop_map = nullptr;
assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc)); assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
setup(pc); setup(pc);
@ -119,7 +119,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
// In thaw, non-heap frames use this constructor to pass oop_map. I don't know why. // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
assert(_on_heap || _cb != nullptr, "these frames are always heap frames"); assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
if (cb != NULL) { if (cb != nullptr) {
setup(pc); setup(pc);
} }
#ifdef ASSERT #ifdef ASSERT
@ -138,10 +138,10 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
_unextended_sp = unextended_sp; _unextended_sp = unextended_sp;
_fp = ptr_fp; _fp = ptr_fp;
_pc = pc; _pc = pc;
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob_fast(pc); _cb = CodeCache::find_blob_fast(pc);
assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(ptr_sp), p2i(unextended_sp), p2i(ptr_fp)); assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(ptr_sp), p2i(unextended_sp), p2i(ptr_fp));
_oop_map = NULL; _oop_map = nullptr;
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
@ -172,7 +172,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
} else { } else {
@ -192,19 +192,19 @@ inline bool frame::equal(frame other) const {
} }
// Return unique id for this frame. The id must have a value where we can distinguish // Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. NULL represents an invalid (incomparable) // identity and younger/older relationship. null represents an invalid (incomparable)
// frame. // frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); } inline intptr_t* frame::id(void) const { return unextended_sp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id // Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
return this->id() > id ; } return this->id() > id ; }
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); } inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline intptr_t* frame::link_or_null() const { inline intptr_t* frame::link_or_null() const {
intptr_t** ptr = (intptr_t **)addr_at(link_offset); intptr_t** ptr = (intptr_t **)addr_at(link_offset);
return os::is_readable_pointer(ptr) ? *ptr : NULL; return os::is_readable_pointer(ptr) ? *ptr : nullptr;
} }
inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; } inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
@ -213,7 +213,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); return
inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; } inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
inline intptr_t* frame::real_fp() const { inline intptr_t* frame::real_fp() const {
if (_cb != NULL) { if (_cb != nullptr) {
// use the frame size if valid // use the frame size if valid
int size = _cb->frame_size(); int size = _cb->frame_size();
if (size > 0) { if (size > 0) {
@ -237,7 +237,7 @@ inline int frame::compiled_frame_stack_argsize() const {
} }
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
assert(mask != NULL, ""); assert(mask != nullptr, "");
Method* m = interpreter_frame_method(); Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci(); int bci = interpreter_frame_bci();
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
@ -287,7 +287,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
// top of expression stack // top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const { inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp(); intptr_t* last_sp = interpreter_frame_last_sp();
if (last_sp == NULL) { if (last_sp == nullptr) {
return sp(); return sp();
} else { } else {
// sp() may have been extended or shrunk by an adapter. At least // sp() may have been extended or shrunk by an adapter. At least
@ -326,13 +326,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
// Compiled frames // Compiled frames
inline oop frame::saved_oop_result(RegisterMap* map) const { inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr); oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
return (*result_adr); return (*result_adr);
} }
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr); oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
*result_adr = obj; *result_adr = obj;
} }
@ -345,17 +345,17 @@ inline int frame::sender_sp_ret_address_offset() {
} }
inline const ImmutableOopMap* frame::get_oop_map() const { inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL; if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != NULL) { if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc); NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) { if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff); int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc); return _cb->oop_map_for_slot(slot, _pc);
} }
const ImmutableOopMap* oop_map = OopMapSet::find_map(this); const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map; return oop_map;
} }
return NULL; return nullptr;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -375,7 +375,7 @@ frame frame::sender(RegisterMap* map) const {
frame frame::sender_raw(RegisterMap* map) const { frame frame::sender_raw(RegisterMap* map) const {
// Default is we done have to follow them. The sender_for_xxx will // Default is we done have to follow them. The sender_for_xxx will
// update it accordingly // update it accordingly
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
map->set_include_argument_oops(false); map->set_include_argument_oops(false);
if (map->in_cont()) { // already in an h-stack if (map->in_cont()) { // already in an h-stack
@ -393,7 +393,7 @@ frame frame::sender_raw(RegisterMap* map) const {
} }
assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != NULL) { if (_cb != nullptr) {
return sender_for_compiled_frame(map); return sender_for_compiled_frame(map);
} }
@ -421,20 +421,20 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp + frame::link_offset); intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp + frame::link_offset);
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
if (map->update_map()) { if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it. // Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag // For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map. // outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != NULL) { if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map); _oop_map->update_register_map(this, map);
} }
} else { } else {
assert(!_cb->caller_must_gc_arguments(map->thread()), ""); assert(!_cb->caller_must_gc_arguments(map->thread()), "");
assert(!map->include_argument_oops(), ""); assert(!map->include_argument_oops(), "");
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
} }
// Since the prolog does the save and restore of FP there is no // Since the prolog does the save and restore of FP there is no
@ -460,7 +460,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// frame::update_map_with_saved_link // frame::update_map_with_saved_link
template <typename RegisterMapT> template <typename RegisterMapT>
void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) { void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
// The interpreter and compiler(s) always save FP in a known // The interpreter and compiler(s) always save FP in a known
// location on entry. C2-compiled code uses FP as an allocatable // location on entry. C2-compiled code uses FP as an allocatable
// callee-saved register. We must record where that location is so // callee-saved register. We must record where that location is so

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -198,11 +198,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ srli(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes); __ srli(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
__ beqz(tmp1, done); __ beqz(tmp1, done);
// crosses regions, storing NULL? // crosses regions, storing null?
__ beqz(new_val, done); __ beqz(new_val, done);
// storing region crossing non-NULL, is card already dirty? // storing region crossing non-null, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base()); ExternalAddress cardtable((address) ct->byte_map_base());
const Register card_addr = tmp1; const Register card_addr = tmp1;
@ -223,7 +223,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ lbu(tmp2, Address(card_addr)); __ lbu(tmp2, Address(card_addr));
__ beqz(tmp2, done); __ beqz(tmp2, done);
// storing a region crossing, non-NULL oop, card is clean. // storing a region crossing, non-null oop, card is clean.
// dirty card and log. // dirty card and log.
__ sb(zr, Address(card_addr)); __ sb(zr, Address(card_addr));
@ -415,7 +415,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Label done; Label done;
Label runtime; Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions. // At this point we know new_value is non-null and the new_value crosses regions.
// Must check to see if card is already dirty // Must check to see if card is already dirty
const Register thread = xthread; const Register thread = xthread;
@ -446,7 +446,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
__ lbu(t0, Address(card_addr, 0)); __ lbu(t0, Address(card_addr, 0));
__ beqz(t0, done); __ beqz(t0, done);
// storing region crossing non-NULL, card is clean. // storing region crossing non-null, card is clean.
// dirty card and log. // dirty card and log.
__ sb(zr, Address(card_addr, 0)); __ sb(zr, Address(card_addr, 0));

View File

@ -240,7 +240,7 @@ void BarrierSetAssembler::clear_patching_epoch() {
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) { void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) { if (bs_nm == nullptr) {
return; return;
} }
@ -249,7 +249,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
Label local_guard; Label local_guard;
NMethodPatchingType patching_type = nmethod_patching_type(); NMethodPatchingType patching_type = nmethod_patching_type();
if (slow_path == NULL) { if (slow_path == nullptr) {
guard = &local_guard; guard = &local_guard;
// RISCV atomic operations require that the memory address be naturally aligned. // RISCV atomic operations require that the memory address be naturally aligned.
@ -304,7 +304,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
ShouldNotReachHere(); ShouldNotReachHere();
} }
if (slow_path == NULL) { if (slow_path == nullptr) {
Label skip_barrier; Label skip_barrier;
__ beq(t0, t1, skip_barrier); __ beq(t0, t1, skip_barrier);
@ -327,7 +327,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) { if (bs == nullptr) {
return; return;
} }

View File

@ -158,7 +158,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
// Calling the runtime using the regular call_VM_leaf mechanism generates // Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base) // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. // that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
// //
// If we care generating the pre-barrier without a frame (e.g. in the // If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to // intrinsified Reference.get() routine) then ebp might be pointing to
@ -282,7 +282,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} }
__ push_call_clobbered_registers(); __ push_call_clobbered_registers();
address target = NULL; address target = nullptr;
if (is_strong) { if (is_strong) {
if (is_narrow) { if (is_narrow) {
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow); target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
@ -467,7 +467,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
// b) A parallel thread may heal the contents of addr, replacing a // b) A parallel thread may heal the contents of addr, replacing a
// from-space pointer held in addr with the to-space pointer // from-space pointer held in addr with the to-space pointer
// representing the new location of the object. // representing the new location of the object.
// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL // Upon entry to cmpxchg_oop, it is assured that new_val equals null
// or it refers to an object that is not being evacuated out of // or it refers to an object that is not being evacuated out of
// from-space, or it refers to the to-space version of an object that // from-space, or it refers to the to-space version of an object that
// is being evacuated out of from-space. // is being evacuated out of from-space.
@ -680,7 +680,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
bool is_native = ShenandoahBarrierSet::is_native_access(decorators); bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
address target = NULL; address target = nullptr;
if (is_strong) { if (is_strong) {
if (is_native) { if (is_native) {
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);

View File

@ -34,7 +34,7 @@
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls past to check cast
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI); define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -43,7 +43,7 @@ int InlineCacheBuffer::ic_stub_code_size() {
#define __ masm-> #define __ masm->
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
assert_cond(code_begin != NULL && entry_point != NULL); assert_cond(code_begin != nullptr && entry_point != nullptr);
ResourceMark rm; ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size()); CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code); MacroAssembler* masm = new MacroAssembler(&code);

View File

@ -85,7 +85,7 @@ void InterpreterMacroAssembler::narrow(Register result) {
} }
void InterpreterMacroAssembler::jump_to_entry(address entry) { void InterpreterMacroAssembler::jump_to_entry(address entry) {
assert(entry != NULL, "Entry must have been generated by now"); assert(entry != nullptr, "Entry must have been generated by now");
j(entry); j(entry);
} }
@ -156,7 +156,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
if (JvmtiExport::can_force_early_return()) { if (JvmtiExport::can_force_early_return()) {
Label L; Label L;
ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset())); ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
beqz(t0, L); // if [thread->jvmti_thread_state() == NULL] then exit beqz(t0, L); // if thread->jvmti_thread_state() is null then exit
// Initiate earlyret handling only if it is not already being processed. // Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code // If the flag has the earlyret_processing bit set, it means that this code
@ -819,7 +819,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
assert(lock_offset == 0, assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock"); "displached header must be first word in BasicObjectLock");
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL); cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
// Test if the oopMark is an obvious stack pointer, i.e., // Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 7) == 0, and // 1) (mark & 7) == 0, and
@ -899,7 +899,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
beqz(header_reg, count); beqz(header_reg, count);
// Atomic swap back the old header // Atomic swap back the old header
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/NULL); cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
// Call the runtime routine for slow case. // Call the runtime routine for slow case.
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
@ -930,7 +930,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
Label set_mdp; Label set_mdp;
push_reg(RegSet::of(x10, x11), sp); // save x10, x11 push_reg(RegSet::of(x10, x11), sp); // save x10, x11
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is null.
ld(x10, Address(xmethod, in_bytes(Method::method_data_offset()))); ld(x10, Address(xmethod, in_bytes(Method::method_data_offset())));
beqz(x10, set_mdp); beqz(x10, set_mdp);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp);
@ -1301,7 +1301,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(
} }
// In the fall-through case, we found no matching item, but we // In the fall-through case, we found no matching item, but we
// observed the item[start_row] is NULL. // observed the item[start_row] is null.
// Fill in the item field and increment the count. // Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row)); int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item); set_mdp_data_at(mdp, item_offset, item);
@ -1319,19 +1319,19 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(
// row[0].incr() // row[0].incr()
// goto done // goto done
// ] // ]
// if (row[0].rec != NULL) then [ // if (row[0].rec != nullptr) then [
// # inner copy of decision tree, rooted at row[1] // # inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) then [ // if (row[1].rec == rec) then [
// row[1].incr() // row[1].incr()
// goto done // goto done
// ] // ]
// if (row[1].rec != NULL) then [ // if (row[1].rec != nullptr) then [
// # degenerate decision tree, rooted at row[2] // # degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) then [ // if (row[2].rec == rec) then [
// row[2].incr() // row[2].incr()
// goto done // goto done
// ] // ]
// if (row[2].rec != NULL) then [ // if (row[2].rec != nullptr) then [
// count.incr() // count.incr()
// goto done // goto done
// ] # overflow // ] # overflow
@ -1613,7 +1613,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
beqz(t0, L); beqz(t0, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL"); " last_sp isn't null");
bind(L); bind(L);
} }
#endif /* ASSERT */ #endif /* ASSERT */
@ -1640,7 +1640,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
beqz(t0, L); beqz(t0, L);
stop("InterpreterMacroAssembler::call_VM_base:" stop("InterpreterMacroAssembler::call_VM_base:"
" last_sp != NULL"); " last_sp isn't null");
bind(L); bind(L);
} }
#endif /* ASSERT */ #endif /* ASSERT */

View File

@ -164,7 +164,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void empty_expression_stack() { void empty_expression_stack() {
ld(esp, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); ld(esp, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call // null last_sp until next java call
sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -227,7 +227,7 @@ class SlowSignatureHandler
virtual void pass_object() { virtual void pass_object() {
intptr_t* addr = single_slot_addr(); intptr_t* addr = single_slot_addr();
intptr_t value = *addr == 0 ? NULL : (intptr_t)addr; intptr_t value = *addr == 0 ? (intptr_t)nullptr : (intptr_t)addr;
if (pass_gpr(value) < 0) { if (pass_gpr(value) < 0) {
pass_stack(value); pass_stack(value);
} }
@ -269,11 +269,11 @@ class SlowSignatureHandler
~SlowSignatureHandler() ~SlowSignatureHandler()
{ {
_from = NULL; _from = nullptr;
_to = NULL; _to = nullptr;
_int_args = NULL; _int_args = nullptr;
_fp_args = NULL; _fp_args = nullptr;
_fp_identifiers = NULL; _fp_identifiers = nullptr;
} }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -40,10 +40,10 @@ public:
void clear(void) { void clear(void) {
// clearing _last_Java_sp must be first // clearing _last_Java_sp must be first
_last_Java_sp = NULL; _last_Java_sp = nullptr;
OrderAccess::release(); OrderAccess::release();
_last_Java_fp = NULL; _last_Java_fp = nullptr;
_last_Java_pc = NULL; _last_Java_pc = nullptr;
} }
void copy(JavaFrameAnchor* src) { void copy(JavaFrameAnchor* src) {
@ -51,12 +51,12 @@ public:
// We must clear _last_Java_sp before copying the rest of the new data // We must clear _last_Java_sp before copying the rest of the new data
// //
// Hack Alert: Temporary bugfix for 4717480/4721647 // Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp // To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing // unless the value is changing
// //
assert(src != NULL, "Src should not be NULL."); assert(src != nullptr, "Src should not be null.");
if (_last_Java_sp != src->_last_Java_sp) { if (_last_Java_sp != src->_last_Java_sp) {
_last_Java_sp = NULL; _last_Java_sp = nullptr;
OrderAccess::release(); OrderAccess::release();
} }
_last_Java_fp = src->_last_Java_fp; _last_Java_fp = src->_last_Java_fp;
@ -65,7 +65,7 @@ public:
_last_Java_sp = src->_last_Java_sp; _last_Java_sp = src->_last_Java_sp;
} }
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; }
void make_walkable(); void make_walkable();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -65,7 +65,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
name = NULL; // unreachable name = nullptr; // unreachable
} }
ResourceMark rm; ResourceMark rm;
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
@ -112,7 +112,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// Both robj and t0 are clobbered by try_resolve_jobject_in_native. // Both robj and t0 are clobbered by try_resolve_jobject_in_native.
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
assert_cond(bs != NULL); assert_cond(bs != nullptr);
bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, t0, slow); bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, t0, slow);
__ srli(roffset, c_rarg2, 2); // offset __ srli(roffset, c_rarg2, 2); // offset
@ -168,7 +168,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
slow_case_addr = NULL; // unreachable slow_case_addr = nullptr; // unreachable
} }
{ {

View File

@ -255,7 +255,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp, Register last_java_fp,
address last_java_pc, address last_java_pc,
Register tmp) { Register tmp) {
assert(last_java_pc != NULL, "must provide a valid PC"); assert(last_java_pc != nullptr, "must provide a valid PC");
la(tmp, last_java_pc); la(tmp, last_java_pc);
sd(tmp, Address(xthread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); sd(tmp, Address(xthread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
@ -364,13 +364,13 @@ void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thr
} }
void MacroAssembler::clinit_barrier(Register klass, Register tmp, Label* L_fast_path, Label* L_slow_path) { void MacroAssembler::clinit_barrier(Register klass, Register tmp, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
assert_different_registers(klass, xthread, tmp); assert_different_registers(klass, xthread, tmp);
Label L_fallthrough, L_tmp; Label L_fallthrough, L_tmp;
if (L_fast_path == NULL) { if (L_fast_path == nullptr) {
L_fast_path = &L_fallthrough; L_fast_path = &L_fallthrough;
} else if (L_slow_path == NULL) { } else if (L_slow_path == nullptr) {
L_slow_path = &L_fallthrough; L_slow_path = &L_fallthrough;
} }
@ -397,7 +397,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
if (!VerifyOops) { return; } if (!VerifyOops) { return; }
// Pass register number to verify_oop_subroutine // Pass register number to verify_oop_subroutine
const char* b = NULL; const char* b = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -436,7 +436,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
return; return;
} }
const char* b = NULL; const char* b = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -560,7 +560,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
assert_different_registers(value, tmp1, tmp2); assert_different_registers(value, tmp1, tmp2);
Label done, tagged, weak_tagged; Label done, tagged, weak_tagged;
beqz(value, done); // Use NULL as-is. beqz(value, done); // Use null as-is.
// Test for tag. // Test for tag.
andi(t0, value, JNIHandles::tag_mask); andi(t0, value, JNIHandles::tag_mask);
bnez(t0, tagged); bnez(t0, tagged);
@ -593,7 +593,7 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis
assert_different_registers(value, tmp1, tmp2); assert_different_registers(value, tmp1, tmp2);
Label done; Label done;
beqz(value, done); // Use NULL as-is. beqz(value, done); // Use null as-is.
#ifdef ASSERT #ifdef ASSERT
{ {
@ -620,7 +620,7 @@ void MacroAssembler::stop(const char* msg) {
} }
void MacroAssembler::unimplemented(const char* what) { void MacroAssembler::unimplemented(const char* what) {
const char* buf = NULL; const char* buf = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -635,7 +635,7 @@ void MacroAssembler::emit_static_call_stub() {
// CompiledDirectStaticCall::set_to_interpreted knows the // CompiledDirectStaticCall::set_to_interpreted knows the
// exact layout of this stub. // exact layout of this stub.
mov_metadata(xmethod, (Metadata*)NULL); mov_metadata(xmethod, (Metadata*)nullptr);
// Jump to the entry point of the c2i stub. // Jump to the entry point of the c2i stub.
int32_t offset = 0; int32_t offset = 0;
@ -648,7 +648,7 @@ void MacroAssembler::call_VM_leaf_base(address entry_point,
Label *retaddr) { Label *retaddr) {
push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp
call(entry_point); call(entry_point);
if (retaddr != NULL) { if (retaddr != nullptr) {
bind(*retaddr); bind(*retaddr);
} }
pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp
@ -833,7 +833,7 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
#define INSN(NAME, REGISTER) \ #define INSN(NAME, REGISTER) \
void MacroAssembler::NAME(const address dest, Register temp) { \ void MacroAssembler::NAME(const address dest, Register temp) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm21(distance) && ((distance % 2) == 0)) { \ if (is_simm21(distance) && ((distance % 2) == 0)) { \
Assembler::jal(REGISTER, distance); \ Assembler::jal(REGISTER, distance); \
@ -877,7 +877,7 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
#define INSN(NAME) \ #define INSN(NAME) \
void MacroAssembler::NAME(Register Rd, const address dest, Register temp) { \ void MacroAssembler::NAME(Register Rd, const address dest, Register temp) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm21(distance) && ((distance % 2) == 0)) { \ if (is_simm21(distance) && ((distance % 2) == 0)) { \
Assembler::NAME(Rd, distance); \ Assembler::NAME(Rd, distance); \
@ -1415,7 +1415,7 @@ int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
} }
static long get_offset_of_jal(address insn_addr) { static long get_offset_of_jal(address insn_addr) {
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
long offset = 0; long offset = 0;
unsigned insn = *(unsigned*)insn_addr; unsigned insn = *(unsigned*)insn_addr;
long val = (long)Assembler::sextract(insn, 31, 12); long val = (long)Assembler::sextract(insn, 31, 12);
@ -1429,7 +1429,7 @@ static long get_offset_of_jal(address insn_addr) {
static long get_offset_of_conditional_branch(address insn_addr) { static long get_offset_of_conditional_branch(address insn_addr) {
long offset = 0; long offset = 0;
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
unsigned insn = *(unsigned*)insn_addr; unsigned insn = *(unsigned*)insn_addr;
offset = (long)Assembler::sextract(insn, 31, 31); offset = (long)Assembler::sextract(insn, 31, 31);
offset = (offset << 12) | (((long)(Assembler::sextract(insn, 7, 7) & 0x1)) << 11); offset = (offset << 12) | (((long)(Assembler::sextract(insn, 7, 7) & 0x1)) << 11);
@ -1441,7 +1441,7 @@ static long get_offset_of_conditional_branch(address insn_addr) {
static long get_offset_of_pc_relative(address insn_addr) { static long get_offset_of_pc_relative(address insn_addr) {
long offset = 0; long offset = 0;
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
offset = ((long)(Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12))) << 12; // Auipc. offset = ((long)(Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12))) << 12; // Auipc.
offset += ((long)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addi/Jalr/Load. offset += ((long)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addi/Jalr/Load.
offset = (offset << 32) >> 32; offset = (offset << 32) >> 32;
@ -1449,7 +1449,7 @@ static long get_offset_of_pc_relative(address insn_addr) {
} }
static address get_target_of_movptr(address insn_addr) { static address get_target_of_movptr(address insn_addr) {
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 29; // Lui. intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 29; // Lui.
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 17; // Addi. target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 17; // Addi.
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 6; // Addi. target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 6; // Addi.
@ -1458,7 +1458,7 @@ static address get_target_of_movptr(address insn_addr) {
} }
static address get_target_of_li64(address insn_addr) { static address get_target_of_li64(address insn_addr) {
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 44; // Lui. intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 44; // Lui.
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 32; // Addi. target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 32; // Addi.
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 20; // Addi. target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 20; // Addi.
@ -1468,7 +1468,7 @@ static address get_target_of_li64(address insn_addr) {
} }
address MacroAssembler::get_target_of_li32(address insn_addr) { address MacroAssembler::get_target_of_li32(address insn_addr) {
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 12; // Lui. intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 12; // Lui.
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addiw. target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addiw.
return (address)target_address; return (address)target_address;
@ -1477,7 +1477,7 @@ address MacroAssembler::get_target_of_li32(address insn_addr) {
// Patch any kind of instruction; there may be several instructions. // Patch any kind of instruction; there may be several instructions.
// Return the total length (in bytes) of the instructions. // Return the total length (in bytes) of the instructions.
int MacroAssembler::pd_patch_instruction_size(address branch, address target) { int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
assert_cond(branch != NULL); assert_cond(branch != nullptr);
int64_t offset = target - branch; int64_t offset = target - branch;
if (NativeInstruction::is_jal_at(branch)) { // jal if (NativeInstruction::is_jal_at(branch)) { // jal
return patch_offset_in_jal(branch, offset); return patch_offset_in_jal(branch, offset);
@ -1505,7 +1505,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
address MacroAssembler::target_addr_for_insn(address insn_addr) { address MacroAssembler::target_addr_for_insn(address insn_addr) {
long offset = 0; long offset = 0;
assert_cond(insn_addr != NULL); assert_cond(insn_addr != nullptr);
if (NativeInstruction::is_jal_at(insn_addr)) { // jal if (NativeInstruction::is_jal_at(insn_addr)) { // jal
offset = get_offset_of_jal(insn_addr); offset = get_offset_of_jal(insn_addr);
} else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne } else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
@ -1879,7 +1879,7 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1
assert_different_registers(oop, trial_klass, tmp1, tmp2); assert_different_registers(oop, trial_klass, tmp1, tmp2);
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
lwu(tmp1, Address(oop, oopDesc::klass_offset_in_bytes())); lwu(tmp1, Address(oop, oopDesc::klass_offset_in_bytes()));
if (CompressedKlassPointers::base() == NULL) { if (CompressedKlassPointers::base() == nullptr) {
slli(tmp1, tmp1, CompressedKlassPointers::shift()); slli(tmp1, tmp1, CompressedKlassPointers::shift());
beq(trial_klass, tmp1, L); beq(trial_klass, tmp1, L);
return; return;
@ -1894,7 +1894,7 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1
// Move an oop into a register. // Move an oop into a register.
void MacroAssembler::movoop(Register dst, jobject obj) { void MacroAssembler::movoop(Register dst, jobject obj) {
int oop_index; int oop_index;
if (obj == NULL) { if (obj == nullptr) {
oop_index = oop_recorder()->allocate_oop_index(obj); oop_index = oop_recorder()->allocate_oop_index(obj);
} else { } else {
#ifdef ASSERT #ifdef ASSERT
@ -1918,7 +1918,7 @@ void MacroAssembler::movoop(Register dst, jobject obj) {
// Move a metadata address into a register. // Move a metadata address into a register.
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
int oop_index; int oop_index;
if (obj == NULL) { if (obj == nullptr) {
oop_index = oop_recorder()->allocate_metadata_index(obj); oop_index = oop_recorder()->allocate_metadata_index(obj);
} else { } else {
oop_index = oop_recorder()->find_index(obj); oop_index = oop_recorder()->find_index(obj);
@ -1974,7 +1974,7 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value
SkipIfEqual::~SkipIfEqual() { SkipIfEqual::~SkipIfEqual() {
_masm->bind(_label); _masm->bind(_label);
_masm = NULL; _masm = nullptr;
} }
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
@ -2023,13 +2023,13 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
void MacroAssembler::null_check(Register reg, int offset) { void MacroAssembler::null_check(Register reg, int offset) {
if (needs_explicit_null_check(offset)) { if (needs_explicit_null_check(offset)) {
// provoke OS NULL exception if reg = NULL by // provoke OS null exception if reg is null by
// accessing M[reg] w/o changing any registers // accessing M[reg] w/o changing any registers
// NOTE: this is plenty to provoke a segv // NOTE: this is plenty to provoke a segv
ld(zr, Address(reg, 0)); ld(zr, Address(reg, 0));
} else { } else {
// nothing to do, (later) access of M[reg + offset] // nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL // will provoke OS null exception if reg is null
} }
} }
@ -2049,7 +2049,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
// Algorithm must match CompressedOops::encode. // Algorithm must match CompressedOops::encode.
void MacroAssembler::encode_heap_oop(Register d, Register s) { void MacroAssembler::encode_heap_oop(Register d, Register s) {
verify_oop_msg(s, "broken oop in encode_heap_oop"); verify_oop_msg(s, "broken oop in encode_heap_oop");
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
srli(d, s, LogMinObjAlignmentInBytes); srli(d, s, LogMinObjAlignmentInBytes);
@ -2106,7 +2106,7 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) { void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) {
assert(UseCompressedClassPointers, "should only be used for compressed headers"); assert(UseCompressedClassPointers, "should only be used for compressed headers");
if (CompressedKlassPointers::base() == NULL) { if (CompressedKlassPointers::base() == nullptr) {
if (CompressedKlassPointers::shift() != 0) { if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
slli(dst, src, LogKlassAlignmentInBytes); slli(dst, src, LogKlassAlignmentInBytes);
@ -2141,7 +2141,7 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) { void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) {
assert(UseCompressedClassPointers, "should only be used for compressed headers"); assert(UseCompressedClassPointers, "should only be used for compressed headers");
if (CompressedKlassPointers::base() == NULL) { if (CompressedKlassPointers::base() == nullptr) {
if (CompressedKlassPointers::shift() != 0) { if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
srli(dst, src, LogKlassAlignmentInBytes); srli(dst, src, LogKlassAlignmentInBytes);
@ -2177,24 +2177,24 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) {
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
assert(UseCompressedOops, "should only be used for compressed headers"); assert(UseCompressedOops, "should only be used for compressed headers");
assert(Universe::heap() != NULL, "java heap should be initialized"); assert(Universe::heap() != nullptr, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
slli(dst, src, LogMinObjAlignmentInBytes); slli(dst, src, LogMinObjAlignmentInBytes);
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
add(dst, xheapbase, dst); add(dst, xheapbase, dst);
} }
} else { } else {
assert(CompressedOops::base() == NULL, "sanity"); assert(CompressedOops::base() == nullptr, "sanity");
mv(dst, src); mv(dst, src);
} }
} }
void MacroAssembler::decode_heap_oop(Register d, Register s) { void MacroAssembler::decode_heap_oop(Register d, Register s) {
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0 || d != s) { if (CompressedOops::shift() != 0 || d != s) {
slli(d, s, CompressedOops::shift()); slli(d, s, CompressedOops::shift());
} }
@ -2223,7 +2223,7 @@ void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register
access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL, dst, src, tmp1, tmp2); access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL, dst, src, tmp1, tmp2);
} }
// Used for storing NULLs. // Used for storing nulls.
void MacroAssembler::store_heap_oop_null(Address dst) { void MacroAssembler::store_heap_oop_null(Address dst) {
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
} }
@ -2366,7 +2366,7 @@ void MacroAssembler::membar(uint32_t order_constraint) {
address prev = pc() - NativeMembar::instruction_size; address prev = pc() - NativeMembar::instruction_size;
address last = code()->last_insn(); address last = code()->last_insn();
if (last != NULL && nativeInstruction_at(last)->is_membar() && prev == last) { if (last != nullptr && nativeInstruction_at(last)->is_membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev); NativeMembar *bar = NativeMembar_at(prev);
// We are merging two memory barrier instructions. On RISCV we // We are merging two memory barrier instructions. On RISCV we
// can do this simply by ORing them together. // can do this simply by ORing them together.
@ -2405,8 +2405,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
Register tmp_reg, Register tmp_reg,
Label& L_success) { Label& L_success) {
Label L_failure; Label L_failure;
check_klass_subtype_fast_path(sub_klass, super_klass, tmp_reg, &L_success, &L_failure, NULL); check_klass_subtype_fast_path(sub_klass, super_klass, tmp_reg, &L_success, &L_failure, nullptr);
check_klass_subtype_slow_path(sub_klass, super_klass, tmp_reg, noreg, &L_success, NULL); check_klass_subtype_slow_path(sub_klass, super_klass, tmp_reg, noreg, &L_success, nullptr);
bind(L_failure); bind(L_failure);
} }
@ -2443,7 +2443,7 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
bind(nope); bind(nope);
membar(AnyAny); membar(AnyAny);
mv(oldv, tmp); mv(oldv, tmp);
if (fail != NULL) { if (fail != nullptr) {
j(*fail); j(*fail);
} }
} }
@ -2707,7 +2707,7 @@ ATOMIC_XCHGU(xchgalwu, xchgalw)
void MacroAssembler::far_jump(Address entry, Register tmp) { void MacroAssembler::far_jump(Address entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range"); assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != NULL, assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache"); "destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type || entry.rspec().type() == relocInfo::runtime_call_type
@ -2728,7 +2728,7 @@ void MacroAssembler::far_jump(Address entry, Register tmp) {
void MacroAssembler::far_call(Address entry, Register tmp) { void MacroAssembler::far_call(Address entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range"); assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != NULL, assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache"); "destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type || entry.rspec().type() == relocInfo::runtime_call_type
@ -2764,10 +2764,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label L_fallthrough; Label L_fallthrough;
int label_nulls = 0; int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in batch"); assert(label_nulls <= 1, "at most one null in batch");
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
@ -2850,10 +2850,10 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
Label L_fallthrough; Label L_fallthrough;
int label_nulls = 0; int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one null in the batch");
// A couple of useful fields in sub_klass: // A couple of useful fields in sub_klass:
int ss_offset = in_bytes(Klass::secondary_supers_offset()); int ss_offset = in_bytes(Klass::secondary_supers_offset());
@ -3056,8 +3056,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
{ {
ThreadInVMfromUnknown tiv; ThreadInVMfromUnknown tiv;
assert (UseCompressedOops, "should only be used for compressed oops"); assert (UseCompressedOops, "should only be used for compressed oops");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
} }
#endif #endif
@ -3070,7 +3070,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k); int index = oop_recorder()->find_index(k);
assert(!Universe::heap()->is_in(k), "should not be an oop"); assert(!Universe::heap()->is_in(k), "should not be an oop");
@ -3099,9 +3099,9 @@ address MacroAssembler::trampoline_call(Address entry) {
code()->share_trampoline_for(entry.target(), offset()); code()->share_trampoline_for(entry.target(), offset());
} else { } else {
address stub = emit_trampoline_stub(offset(), target); address stub = emit_trampoline_stub(offset(), target);
if (stub == NULL) { if (stub == nullptr) {
postcond(pc() == badAddress); postcond(pc() == badAddress);
return NULL; // CodeCache is full return nullptr; // CodeCache is full
} }
} }
} }
@ -3126,7 +3126,7 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
IncompressibleRegion ir(this); // relocations IncompressibleRegion ir(this); // relocations
movptr(t1, (address)Universe::non_oop_word()); movptr(t1, (address)Universe::non_oop_word());
assert_cond(entry != NULL); assert_cond(entry != nullptr);
return trampoline_call(Address(entry, rh)); return trampoline_call(Address(entry, rh));
} }
@ -3145,8 +3145,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
address dest) { address dest) {
// Max stub size: alignment nop, TrampolineStub. // Max stub size: alignment nop, TrampolineStub.
address stub = start_a_stub(max_trampoline_stub_size()); address stub = start_a_stub(max_trampoline_stub_size());
if (stub == NULL) { if (stub == nullptr) {
return NULL; // CodeBuffer::expand failed return nullptr; // CodeBuffer::expand failed
} }
// We are always 4-byte aligned here. // We are always 4-byte aligned here.
@ -3895,13 +3895,13 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) {
bltu(cnt, t0, around); bltu(cnt, t0, around);
{ {
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::riscv::zero_blocks()); RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::riscv::zero_blocks());
assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated"); assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
if (StubRoutines::riscv::complete()) { if (StubRoutines::riscv::complete()) {
address tpc = trampoline_call(zero_blocks); address tpc = trampoline_call(zero_blocks);
if (tpc == NULL) { if (tpc == nullptr) {
DEBUG_ONLY(reset_labels(around)); DEBUG_ONLY(reset_labels(around));
postcond(pc() == badAddress); postcond(pc() == badAddress);
return NULL; return nullptr;
} }
} else { } else {
jal(zero_blocks); jal(zero_blocks);
@ -4319,12 +4319,12 @@ void MacroAssembler::object_move(OopMap* map,
VMRegPair dst, VMRegPair dst,
bool is_receiver, bool is_receiver,
int* receiver_offset) { int* receiver_offset) {
assert_cond(map != NULL && receiver_offset != NULL); assert_cond(map != nullptr && receiver_offset != nullptr);
// must pass a handle. First figure out the location we use as a handle // must pass a handle. First figure out the location we use as a handle
Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register(); Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register();
// See if oop is NULL if it is we need no handle // See if oop is null if it is we need no handle
if (src.first()->is_stack()) { if (src.first()->is_stack()) {
// Oop is already on the stack as an argument // Oop is already on the stack as an argument
@ -4336,7 +4336,7 @@ void MacroAssembler::object_move(OopMap* map,
ld(t0, Address(fp, reg2offset_in(src.first()))); ld(t0, Address(fp, reg2offset_in(src.first())));
la(rHandle, Address(fp, reg2offset_in(src.first()))); la(rHandle, Address(fp, reg2offset_in(src.first())));
// conditionally move a NULL // conditionally move a null
Label notZero1; Label notZero1;
bnez(t0, notZero1); bnez(t0, notZero1);
mv(rHandle, zr); mv(rHandle, zr);
@ -4344,7 +4344,7 @@ void MacroAssembler::object_move(OopMap* map,
} else { } else {
// Oop is in a register we must store it to the space we reserve // Oop is in a register we must store it to the space we reserve
// on the stack for oop_handles and pass a handle if oop is non-NULL // on the stack for oop_handles and pass a handle if oop is non-null
const Register rOop = src.first()->as_Register(); const Register rOop = src.first()->as_Register();
int oop_slot = -1; int oop_slot = -1;
@ -4371,7 +4371,7 @@ void MacroAssembler::object_move(OopMap* map,
int offset = oop_slot * VMRegImpl::stack_slot_size; int offset = oop_slot * VMRegImpl::stack_slot_size;
map->set_oop(VMRegImpl::stack2reg(oop_slot)); map->set_oop(VMRegImpl::stack2reg(oop_slot));
// Store oop in handle area, may be NULL // Store oop in handle area, may be null
sd(rOop, Address(sp, offset)); sd(rOop, Address(sp, offset));
if (is_receiver) { if (is_receiver) {
*receiver_offset = offset; *receiver_offset = offset;

View File

@ -154,7 +154,7 @@ class MacroAssembler: public Assembler {
virtual void call_VM_leaf_base( virtual void call_VM_leaf_base(
address entry_point, // the entry point address entry_point, // the entry point
int number_of_arguments, // the number of arguments to pop after the call int number_of_arguments, // the number of arguments to pop after the call
Label* retaddr = NULL Label* retaddr = nullptr
); );
virtual void call_VM_leaf_base( virtual void call_VM_leaf_base(
@ -218,19 +218,19 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register dst, Register src); void store_klass_gap(Register dst, Register src);
// currently unimplemented // currently unimplemented
// Used for storing NULL. All other oop constants should be // Used for storing null. All other oop constants should be
// stored using routines that take a jobject. // stored using routines that take a jobject.
void store_heap_oop_null(Address dst); void store_heap_oop_null(Address dst);
// This dummy is to prevent a call to store_heap_oop from // This dummy is to prevent a call to store_heap_oop from
// converting a zero (linked NULL) into a Register by giving // converting a zero (linked null) into a Register by giving
// the compiler two choices it can't resolve // the compiler two choices it can't resolve
void store_heap_oop(Address dst, void* dummy); void store_heap_oop(Address dst, void* dummy);
// Support for NULL-checks // Support for null-checks
// //
// Generates code that causes a NULL OS exception if the content of reg is NULL. // Generates code that causes a null OS exception if the content of reg is null.
// If the accessed location is M[reg + offset] and the offset is known, provide the // If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generateion is needed if the offset is within a certain // offset. No explicit code generateion is needed if the offset is within a certain
// range (0 <= offset <= page_size). // range (0 <= offset <= page_size).
@ -291,7 +291,7 @@ class MacroAssembler: public Assembler {
// Test sub_klass against super_klass, with fast and slow paths. // Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through. // One of the three labels can be null, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass. // If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except tmp_reg // No registers are killed, except tmp_reg
void check_klass_subtype_fast_path(Register sub_klass, void check_klass_subtype_fast_path(Register sub_klass,
@ -394,7 +394,7 @@ class MacroAssembler: public Assembler {
// Required platform-specific helpers for Label::patch_instructions. // Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined. // They _shadow_ the declarations in AbstractAssembler, which are undefined.
static int pd_patch_instruction_size(address branch, address target); static int pd_patch_instruction_size(address branch, address target);
static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
pd_patch_instruction_size(branch, target); pd_patch_instruction_size(branch, target);
} }
static address pd_call_destination(address branch) { static address pd_call_destination(address branch) {
@ -589,7 +589,7 @@ class MacroAssembler: public Assembler {
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(Register Rs1, Register Rs2, const address dest) { \ void NAME(Register Rs1, Register Rs2, const address dest) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t offset = dest - pc(); \ int64_t offset = dest - pc(); \
guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \ guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \
Assembler::NAME(Rs1, Rs2, offset); \ Assembler::NAME(Rs1, Rs2, offset); \
@ -779,7 +779,7 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(Register Rd, address dest) { \ void NAME(Register Rd, address dest) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm32(distance)) { \ if (is_simm32(distance)) { \
auipc(Rd, (int32_t)distance + 0x800); \ auipc(Rd, (int32_t)distance + 0x800); \
@ -836,7 +836,7 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(FloatRegister Rd, address dest, Register temp = t0) { \ void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm32(distance)) { \ if (is_simm32(distance)) { \
auipc(temp, (int32_t)distance + 0x800); \ auipc(temp, (int32_t)distance + 0x800); \
@ -896,7 +896,7 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(Register Rs, address dest, Register temp = t0) { \ void NAME(Register Rs, address dest, Register temp = t0) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
assert_different_registers(Rs, temp); \ assert_different_registers(Rs, temp); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm32(distance)) { \ if (is_simm32(distance)) { \
@ -942,7 +942,7 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(FloatRegister Rs, address dest, Register temp = t0) { \ void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
assert_cond(dest != NULL); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ int64_t distance = dest - pc(); \
if (is_simm32(distance)) { \ if (is_simm32(distance)) { \
auipc(temp, (int32_t)distance + 0x800); \ auipc(temp, (int32_t)distance + 0x800); \
@ -1138,7 +1138,7 @@ public:
// - relocInfo::static_call_type // - relocInfo::static_call_type
// - relocInfo::virtual_call_type // - relocInfo::virtual_call_type
// //
// Return: the call PC or NULL if CodeCache is full. // Return: the call PC or null if CodeCache is full.
address trampoline_call(Address entry); address trampoline_call(Address entry);
address ic_call(address entry, jint method_index = 0); address ic_call(address entry, jint method_index = 0);
@ -1158,7 +1158,7 @@ public:
void cmpptr(Register src1, Address src2, Label& equal); void cmpptr(Register src1, Address src2, Label& equal);
void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = NULL, Label* L_slow_path = NULL); void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr);
void load_method_holder_cld(Register result, Register method); void load_method_holder_cld(Register result, Register method);
void load_method_holder(Register holder, Register method); void load_method_holder(Register holder, Register method);
@ -1332,7 +1332,7 @@ public:
void rt_call(address dest, Register tmp = t0); void rt_call(address dest, Register tmp = t0);
void call(const address dest, Register temp = t0) { void call(const address dest, Register temp = t0) {
assert_cond(dest != NULL); assert_cond(dest != nullptr);
assert(temp != noreg, "expecting a register"); assert(temp != noreg, "expecting a register");
int32_t offset = 0; int32_t offset = 0;
mv(temp, dest, offset); mv(temp, dest, offset);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -93,12 +93,12 @@
static bool const_oop_prefer_decode() { static bool const_oop_prefer_decode() {
// Prefer ConN+DecodeN over ConP in simple compressed oops mode. // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
return CompressedOops::base() == NULL; return CompressedOops::base() == nullptr;
} }
static bool const_klass_prefer_decode() { static bool const_klass_prefer_decode() {
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
return CompressedKlassPointers::base() == NULL; return CompressedKlassPointers::base() == nullptr;
} }
// Is it better to copy float constants, or load them directly from // Is it better to copy float constants, or load them directly from

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -173,14 +173,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument. // They all allow an appendix argument.
__ ebreak(); // empty stubs make SG sick __ ebreak(); // empty stubs make SG sick
return NULL; return nullptr;
} }
// No need in interpreter entry for linkToNative for now. // No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c. // Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) { if (iid == vmIntrinsics::_linkToNative) {
__ ebreak(); __ ebreak();
return NULL; return nullptr;
} }
// x19_sender_sp: sender SP (must preserve; see prepare_to_jump_from_interpreted) // x19_sender_sp: sender SP (must preserve; see prepare_to_jump_from_interpreted)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -41,27 +41,27 @@
#endif #endif
Register NativeInstruction::extract_rs1(address instr) { Register NativeInstruction::extract_rs1(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15)); return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15));
} }
Register NativeInstruction::extract_rs2(address instr) { Register NativeInstruction::extract_rs2(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20)); return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20));
} }
Register NativeInstruction::extract_rd(address instr) { Register NativeInstruction::extract_rd(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7)); return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7));
} }
uint32_t NativeInstruction::extract_opcode(address instr) { uint32_t NativeInstruction::extract_opcode(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return Assembler::extract(((unsigned*)instr)[0], 6, 0); return Assembler::extract(((unsigned*)instr)[0], 6, 0);
} }
uint32_t NativeInstruction::extract_funct3(address instr) { uint32_t NativeInstruction::extract_funct3(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return Assembler::extract(((unsigned*)instr)[0], 14, 12); return Assembler::extract(((unsigned*)instr)[0], 14, 12);
} }
@ -128,7 +128,7 @@ address NativeCall::destination() const {
CodeBlob* cb = CodeCache::find_blob(addr); CodeBlob* cb = CodeCache::find_blob(addr);
assert(cb && cb->is_nmethod(), "sanity"); assert(cb && cb->is_nmethod(), "sanity");
nmethod *nm = (nmethod *)cb; nmethod *nm = (nmethod *)cb;
if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { if (nm != nullptr && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
// Yes we do, so get the destination from the trampoline stub. // Yes we do, so get the destination from the trampoline stub.
const address trampoline_stub_addr = destination; const address trampoline_stub_addr = destination;
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination(); destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
@ -157,7 +157,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
// Patch the constant in the call's trampoline stub. // Patch the constant in the call's trampoline stub.
address trampoline_stub_addr = get_trampoline(); address trampoline_stub_addr = get_trampoline();
if (trampoline_stub_addr != NULL) { if (trampoline_stub_addr != nullptr) {
assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines"); assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
} }
@ -166,7 +166,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
if (Assembler::reachable_from_branch_at(addr_call, dest)) { if (Assembler::reachable_from_branch_at(addr_call, dest)) {
set_destination(dest); set_destination(dest);
} else { } else {
assert (trampoline_stub_addr != NULL, "we need a trampoline"); assert (trampoline_stub_addr != nullptr, "we need a trampoline");
set_destination(trampoline_stub_addr); set_destination(trampoline_stub_addr);
} }
@ -177,18 +177,18 @@ address NativeCall::get_trampoline() {
address call_addr = addr_at(0); address call_addr = addr_at(0);
CodeBlob *code = CodeCache::find_blob(call_addr); CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != NULL, "Could not find the containing code blob"); assert(code != nullptr, "Could not find the containing code blob");
address jal_destination = MacroAssembler::pd_call_destination(call_addr); address jal_destination = MacroAssembler::pd_call_destination(call_addr);
if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) { if (code != nullptr && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
return jal_destination; return jal_destination;
} }
if (code != NULL && code->is_nmethod()) { if (code != nullptr && code->is_nmethod()) {
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
} }
return NULL; return nullptr;
} }
// Inserts a native call instruction at a given pc // Inserts a native call instruction at a given pc
@ -226,7 +226,7 @@ void NativeMovConstReg::set_data(intptr_t x) {
// instruction in oops section. // instruction in oops section.
CodeBlob* cb = CodeCache::find_blob(instruction_address()); CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb->as_nmethod_or_null(); nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) { if (nm != nullptr) {
RelocIterator iter(nm, instruction_address(), next_instruction_address()); RelocIterator iter(nm, instruction_address(), next_instruction_address());
while (iter.next()) { while (iter.next()) {
if (iter.type() == relocInfo::oop_type) { if (iter.type() == relocInfo::oop_type) {
@ -329,7 +329,7 @@ bool NativeInstruction::is_safepoint_poll() {
} }
bool NativeInstruction::is_lwu_to_zr(address instr) { bool NativeInstruction::is_lwu_to_zr(address instr) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0000011 && return (extract_opcode(instr) == 0b0000011 &&
extract_funct3(instr) == 0b110 && extract_funct3(instr) == 0b110 &&
extract_rd(instr) == zr); // zr extract_rd(instr) == zr); // zr
@ -342,7 +342,7 @@ bool NativeInstruction::is_sigill_not_entrant() {
} }
void NativeIllegalInstruction::insert(address code_pos) { void NativeIllegalInstruction::insert(address code_pos) {
assert_cond(code_pos != NULL); assert_cond(code_pos != nullptr);
*(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction *(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -68,21 +68,21 @@ class NativeInstruction {
bool is_call() const { return is_call_at(addr_at(0)); } bool is_call() const { return is_call_at(addr_at(0)); }
bool is_jump() const { return is_jump_at(addr_at(0)); } bool is_jump() const { return is_jump_at(addr_at(0)); }
static bool is_jal_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1101111; } static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
static bool is_jalr_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; } static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
static bool is_branch_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100011; } static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
static bool is_ld_at(address instr) { assert_cond(instr != NULL); return is_load_at(instr) && extract_funct3(instr) == 0b011; } static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
static bool is_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000011; } static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000111; } static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
static bool is_auipc_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010111; } static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
static bool is_jump_at(address instr) { assert_cond(instr != NULL); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); } static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
static bool is_addi_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; } static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; } static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != NULL); return is_addiw_at(instr) && extract_rd(instr) == zr; } static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
static bool is_lui_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0110111; } static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != NULL); return is_lui_at(instr) && extract_rd(instr) == zr; } static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
static bool is_slli_shift_at(address instr, uint32_t shift) { static bool is_slli_shift_at(address instr, uint32_t shift) {
assert_cond(instr != NULL); assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0010011 && // opcode field return (extract_opcode(instr) == 0b0010011 && // opcode field
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift); // shamt field Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift); // shamt field
@ -313,14 +313,14 @@ class NativeCall: public NativeInstruction {
}; };
inline NativeCall* nativeCall_at(address addr) { inline NativeCall* nativeCall_at(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset); NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset);
DEBUG_ONLY(call->verify()); DEBUG_ONLY(call->verify());
return call; return call;
} }
inline NativeCall* nativeCall_before(address return_address) { inline NativeCall* nativeCall_before(address return_address) {
assert_cond(return_address != NULL); assert_cond(return_address != nullptr);
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
DEBUG_ONLY(call->verify()); DEBUG_ONLY(call->verify());
return call; return call;
@ -357,7 +357,7 @@ class NativeMovConstReg: public NativeInstruction {
return addr_at(load_pc_relative_instruction_size); return addr_at(load_pc_relative_instruction_size);
} }
guarantee(false, "Unknown instruction in NativeMovConstReg"); guarantee(false, "Unknown instruction in NativeMovConstReg");
return NULL; return nullptr;
} }
intptr_t data() const; intptr_t data() const;
@ -378,14 +378,14 @@ class NativeMovConstReg: public NativeInstruction {
}; };
inline NativeMovConstReg* nativeMovConstReg_at(address addr) { inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset); NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
DEBUG_ONLY(test->verify()); DEBUG_ONLY(test->verify());
return test; return test;
} }
inline NativeMovConstReg* nativeMovConstReg_before(address addr) { inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
DEBUG_ONLY(test->verify()); DEBUG_ONLY(test->verify());
return test; return test;
@ -477,7 +477,7 @@ public:
}; };
inline NativeGeneralJump* nativeGeneralJump_at(address addr) { inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
NativeGeneralJump* jump = (NativeGeneralJump*)(addr); NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
debug_only(jump->verify();) debug_only(jump->verify();)
return jump; return jump;
@ -508,7 +508,7 @@ class NativeCallTrampolineStub : public NativeInstruction {
data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr
}; };
address destination(nmethod *nm = NULL) const; address destination(nmethod *nm = nullptr) const;
void set_destination(address new_destination); void set_destination(address new_destination);
ptrdiff_t destination_offset() const; ptrdiff_t destination_offset() const;
}; };
@ -523,7 +523,7 @@ inline bool is_NativeCallTrampolineStub_at(address addr) {
// 1). check the instructions: auipc + ld + jalr // 1). check the instructions: auipc + ld + jalr
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0 // 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
// 3). check if the offset in ld[31:20] equals the data_offset // 3). check if the offset in ld[31:20] equals the data_offset
assert_cond(addr != NULL); assert_cond(addr != nullptr);
const int instr_size = NativeInstruction::instruction_size; const int instr_size = NativeInstruction::instruction_size;
if (NativeInstruction::is_auipc_at(addr) && if (NativeInstruction::is_auipc_at(addr) &&
NativeInstruction::is_ld_at(addr + instr_size) && NativeInstruction::is_ld_at(addr + instr_size) &&
@ -539,7 +539,7 @@ inline bool is_NativeCallTrampolineStub_at(address addr) {
} }
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
return (NativeCallTrampolineStub*)addr; return (NativeCallTrampolineStub*)addr;
} }
@ -551,7 +551,7 @@ public:
}; };
inline NativeMembar *NativeMembar_at(address addr) { inline NativeMembar *NativeMembar_at(address addr) {
assert_cond(addr != NULL); assert_cond(addr != nullptr);
assert(nativeInstruction_at(addr)->is_membar(), "no membar found"); assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
return (NativeMembar*)addr; return (NativeMembar*)addr;
} }
@ -581,7 +581,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
if (nop->check()) { if (nop->check()) {
return nop; return nop;
} }
return NULL; return nullptr;
} }
inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) { inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {
@ -603,7 +603,7 @@ class NativeDeoptInstruction: public NativeInstruction {
void verify(); void verify();
static bool is_deopt_at(address instr) { static bool is_deopt_at(address instr) {
assert(instr != NULL, ""); assert(instr != nullptr, "");
uint32_t value = *(uint32_t *) instr; uint32_t value = *(uint32_t *) instr;
// 0xc0201073 encodes CSRRW x0, instret, x0 // 0xc0201073 encodes CSRRW x0, instret, x0
return value == 0xc0201073; return value == 0xc0201073;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -34,10 +34,10 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
VectorRegister::max_slots_per_register; VectorRegister::max_slots_per_register;
intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
address base_location = location(base_reg, nullptr); address base_location = location(base_reg, nullptr);
if (base_location != NULL) { if (base_location != nullptr) {
return base_location + offset_in_bytes; return base_location + offset_in_bytes;
} else { } else {
return NULL; return nullptr;
} }
} else { } else {
return location(base_reg->next(slot_idx), nullptr); return location(base_reg->next(slot_idx), nullptr);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -32,7 +32,7 @@
private: private:
// This is the hook for finding a register in an "well-known" location, // This is the hook for finding a register in an "well-known" location,
// such as a register block of a predetermined format. // such as a register block of a predetermined format.
address pd_location(VMReg reg) const { return NULL; } address pd_location(VMReg reg) const { return nullptr; }
address pd_location(VMReg base_reg, int slot_idx) const; address pd_location(VMReg base_reg, int slot_idx) const;
// no PD state to clear or copy: // no PD state to clear or copy:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -41,7 +41,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
switch (type()) { switch (type()) {
case relocInfo::oop_type: { case relocInfo::oop_type: {
oop_Relocation *reloc = (oop_Relocation *)this; oop_Relocation *reloc = (oop_Relocation *)this;
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL // in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() isn't null
if (NativeInstruction::is_load_pc_relative_at(addr())) { if (NativeInstruction::is_load_pc_relative_at(addr())) {
address constptr = (address)code()->oop_addr_at(reloc->oop_index()); address constptr = (address)code()->oop_addr_at(reloc->oop_index());
bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr); bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr);
@ -62,11 +62,11 @@ address Relocation::pd_call_destination(address orig_addr) {
assert(is_call(), "should be an address instruction here"); assert(is_call(), "should be an address instruction here");
if (NativeCall::is_call_at(addr())) { if (NativeCall::is_call_at(addr())) {
address trampoline = nativeCall_at(addr())->get_trampoline(); address trampoline = nativeCall_at(addr())->get_trampoline();
if (trampoline != NULL) { if (trampoline != nullptr) {
return nativeCallTrampolineStub_at(trampoline)->destination(); return nativeCallTrampolineStub_at(trampoline)->destination();
} }
} }
if (orig_addr != NULL) { if (orig_addr != nullptr) {
// the extracted address from the instructions in address orig_addr // the extracted address from the instructions in address orig_addr
address new_addr = MacroAssembler::pd_call_destination(orig_addr); address new_addr = MacroAssembler::pd_call_destination(orig_addr);
// If call is branch to self, don't try to relocate it, just leave it // If call is branch to self, don't try to relocate it, just leave it
@ -83,7 +83,7 @@ void Relocation::pd_set_call_destination(address x) {
assert(is_call(), "should be an address instruction here"); assert(is_call(), "should be an address instruction here");
if (NativeCall::is_call_at(addr())) { if (NativeCall::is_call_at(addr())) {
address trampoline = nativeCall_at(addr())->get_trampoline(); address trampoline = nativeCall_at(addr())->get_trampoline();
if (trampoline != NULL) { if (trampoline != nullptr) {
nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false); nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
return; return;
} }

View File

@ -179,7 +179,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
OopMap* oop_map = new OopMap(frame_size_in_slots, 0); OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
assert_cond(oop_maps != NULL && oop_map != NULL); assert_cond(oop_maps != nullptr && oop_map != nullptr);
int sp_offset_in_slots = 0; int sp_offset_in_slots = 0;
int step_in_slots = 0; int step_in_slots = 0;
@ -659,7 +659,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_entry = __ pc(); address c2i_entry = __ pc();
// Class initialization barrier for static methods // Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL; address c2i_no_clinit_check_entry = nullptr;
if (VM_Version::supports_fast_class_init_checks()) { if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier; Label L_skip_barrier;
@ -696,7 +696,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, VMRegPair *regs,
VMRegPair *regs2, VMRegPair *regs2,
int total_args_passed) { int total_args_passed) {
assert(regs2 == NULL, "not needed on riscv"); assert(regs2 == nullptr, "not needed on riscv");
// We return the amount of VMRegImpl stack slots we need to reserve for all // We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots. // the arguments NOT counting out_preserve_stack_slots.
@ -1310,14 +1310,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word, stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1), in_ByteSize(-1),
in_ByteSize(-1), in_ByteSize(-1),
(OopMapSet*)NULL); (OopMapSet*)nullptr);
} }
address native_func = method->native_function(); address native_func = method->native_function();
assert(native_func != NULL, "must have function"); assert(native_func != nullptr, "must have function");
// An OopMap for lock (and class if static) // An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
intptr_t start = (intptr_t)__ pc(); intptr_t start = (intptr_t)__ pc();
// We have received a description of where all the java arg are located // We have received a description of where all the java arg are located
@ -1331,7 +1331,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL; BasicType* in_elem_bt = nullptr;
int argc = 0; int argc = 0;
out_sig_bt[argc++] = T_ADDRESS; out_sig_bt[argc++] = T_ADDRESS;
@ -1345,7 +1345,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space // Now figure out where the args must be stored and how much stack space
// they require. // they require.
int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in // Compute framesize for the wrapper. We need to handlize all oops in
// incoming registers // incoming registers
@ -1469,8 +1469,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ sub(sp, sp, stack_size - 2 * wordSize); __ sub(sp, sp, stack_size - 2 * wordSize);
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
assert_cond(bs != NULL); assert_cond(bs != nullptr);
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
// Frame is now completed as far as size and linkage. // Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start; int frame_complete = ((intptr_t)__ pc()) - start;
@ -1507,7 +1507,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// caller. // caller.
// //
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
assert_cond(map != NULL); assert_cond(map != nullptr);
int float_args = 0; int float_args = 0;
int int_args = 0; int int_args = 0;
@ -1680,7 +1680,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ sd(swap_reg, Address(lock_reg, mark_word_offset)); __ sd(swap_reg, Address(lock_reg, mark_word_offset));
// src -> dest if dest == x10 else x10 <- dest // src -> dest if dest == x10 else x10 <- dest
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL); __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
// Test if the oopMark is an obvious stack pointer, i.e., // Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 3) == 0, and // 1) (mark & 3) == 0, and
@ -2026,7 +2026,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps); oop_maps);
assert(nm != NULL, "create native nmethod fail!"); assert(nm != nullptr, "create native nmethod fail!");
return nm; return nm;
} }
@ -2057,9 +2057,9 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 2048 + pad, 1024); CodeBuffer buffer("deopt_blob", 2048 + pad, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words = -1; int frame_size_in_words = -1;
OopMap* map = NULL; OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
assert_cond(masm != NULL && oop_maps != NULL); assert_cond(masm != nullptr && oop_maps != nullptr);
RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0); RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
// ------------- // -------------
@ -2420,7 +2420,7 @@ void SharedRuntime::generate_deopt_blob() {
masm->flush(); masm->flush();
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
assert(_deopt_blob != NULL, "create deoptimization blob fail!"); assert(_deopt_blob != nullptr, "create deoptimization blob fail!");
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (EnableJVMCI) { if (EnableJVMCI) {
@ -2450,7 +2450,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Setup code generation tools // Setup code generation tools
CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
@ -2497,7 +2497,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Set an oopmap for the call site // Set an oopmap for the call site
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
assert_cond(oop_maps != NULL && map != NULL); assert_cond(oop_maps != nullptr && map != nullptr);
// location of fp is known implicitly by the frame sender code // location of fp is known implicitly by the frame sender code
@ -2649,16 +2649,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
ResourceMark rm; ResourceMark rm;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
OopMap* map = NULL; OopMap* map = nullptr;
// Allocate space for the code. Setup code generation tools. // Allocate space for the code. Setup code generation tools.
CodeBuffer buffer("handler_blob", 2048, 1024); CodeBuffer buffer("handler_blob", 2048, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
address start = __ pc(); address start = __ pc();
address call_pc = NULL; address call_pc = nullptr;
int frame_size_in_words = -1; int frame_size_in_words = -1;
bool cause_return = (poll_type == POLL_AT_RETURN); bool cause_return = (poll_type == POLL_AT_RETURN);
RegisterSaver reg_saver(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */); RegisterSaver reg_saver(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
@ -2773,21 +2773,21 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args. // must do any gc of the args.
// //
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code // allocate space for the code
ResourceMark rm; ResourceMark rm;
CodeBuffer buffer(name, 1000, 512); CodeBuffer buffer(name, 1000, 512);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
int frame_size_in_words = -1; int frame_size_in_words = -1;
RegisterSaver reg_saver(false /* save_vectors */); RegisterSaver reg_saver(false /* save_vectors */);
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
OopMap* map = NULL; OopMap* map = nullptr;
int start = __ offset(); int start = __ offset();
@ -2896,7 +2896,7 @@ void OptoRuntime::generate_exception_blob() {
// Setup code generation tools // Setup code generation tools
CodeBuffer buffer("exception_blob", 2048, 1024); CodeBuffer buffer("exception_blob", 2048, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
// TODO check various assumptions made here // TODO check various assumptions made here
// //
@ -2952,7 +2952,7 @@ void OptoRuntime::generate_exception_blob() {
// exception. // exception.
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL); assert_cond(oop_maps != nullptr);
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));

View File

@ -490,7 +490,7 @@ class StubGenerator: public StubCodeGenerator {
__ sw(t0, Address(xthread, Thread::exception_line_offset())); __ sw(t0, Address(xthread, Thread::exception_line_offset()));
// complete return to VM // complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, assert(StubRoutines::_call_stub_return_address != nullptr,
"_call_stub_return_address must have been generated before"); "_call_stub_return_address must have been generated before");
__ j(StubRoutines::_call_stub_return_address); __ j(StubRoutines::_call_stub_return_address);
@ -612,7 +612,7 @@ class StubGenerator: public StubCodeGenerator {
// object is in x10 // object is in x10
// make sure object is 'reasonable' // make sure object is 'reasonable'
__ beqz(x10, exit); // if obj is NULL it is OK __ beqz(x10, exit); // if obj is null it is OK
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, x10, c_rarg2, c_rarg3, error); bs_asm->check_oop(_masm, x10, c_rarg2, c_rarg3, error);
@ -726,7 +726,7 @@ class StubGenerator: public StubCodeGenerator {
assert_different_registers(s, d, count, t0); assert_different_registers(s, d, count, t0);
Label again, drain; Label again, drain;
const char* stub_name = NULL; const char* stub_name = nullptr;
if (direction == copy_forwards) { if (direction == copy_forwards) {
stub_name = "forward_copy_longs"; stub_name = "forward_copy_longs";
} else { } else {
@ -1118,7 +1118,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc(); address start = __ pc();
__ enter(); __ enter();
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1186,7 +1186,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc(); address start = __ pc();
__ enter(); __ enter();
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1469,8 +1469,8 @@ class StubGenerator: public StubCodeGenerator {
Label L_miss; Label L_miss;
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, super_check_offset); __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr, super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr);
// Fall through on failure! // Fall through on failure!
__ BIND(L_miss); __ BIND(L_miss);
@ -1530,7 +1530,7 @@ class StubGenerator: public StubCodeGenerator {
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
// Caller of this entry point must set up the argument registers. // Caller of this entry point must set up the argument registers.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
@ -1675,8 +1675,8 @@ class StubGenerator: public StubCodeGenerator {
address short_copy_entry, address short_copy_entry,
address int_copy_entry, address int_copy_entry,
address long_copy_entry) { address long_copy_entry) {
assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL && assert_cond(byte_copy_entry != nullptr && short_copy_entry != nullptr &&
int_copy_entry != NULL && long_copy_entry != NULL); int_copy_entry != nullptr && long_copy_entry != nullptr);
Label L_long_aligned, L_int_aligned, L_short_aligned; Label L_long_aligned, L_int_aligned, L_short_aligned;
const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; const Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
@ -1730,9 +1730,9 @@ class StubGenerator: public StubCodeGenerator {
address byte_copy_entry, address short_copy_entry, address byte_copy_entry, address short_copy_entry,
address int_copy_entry, address oop_copy_entry, address int_copy_entry, address oop_copy_entry,
address long_copy_entry, address checkcast_copy_entry) { address long_copy_entry, address checkcast_copy_entry) {
assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL && assert_cond(byte_copy_entry != nullptr && short_copy_entry != nullptr &&
int_copy_entry != NULL && oop_copy_entry != NULL && int_copy_entry != nullptr && oop_copy_entry != nullptr &&
long_copy_entry != NULL && checkcast_copy_entry != NULL); long_copy_entry != nullptr && checkcast_copy_entry != nullptr);
Label L_failed, L_failed_0, L_objArray; Label L_failed, L_failed_0, L_objArray;
Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
@ -1765,13 +1765,13 @@ class StubGenerator: public StubCodeGenerator {
// (2) src_pos must not be negative. // (2) src_pos must not be negative.
// (3) dst_pos must not be negative. // (3) dst_pos must not be negative.
// (4) length must not be negative. // (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL. // (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays. // (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src. // (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst. // (8) dst_pos + length must not exceed length of dst.
// //
// if [src == NULL] then return -1 // if src is null then return -1
__ beqz(src, L_failed); __ beqz(src, L_failed);
// if [src_pos < 0] then return -1 // if [src_pos < 0] then return -1
@ -1779,7 +1779,7 @@ class StubGenerator: public StubCodeGenerator {
__ test_bit(t0, src_pos, 31); __ test_bit(t0, src_pos, 31);
__ bnez(t0, L_failed); __ bnez(t0, L_failed);
// if [dst == NULL] then return -1 // if dst is null then return -1
__ beqz(dst, L_failed); __ beqz(dst, L_failed);
// if [dst_pos < 0] then return -1 // if [dst_pos < 0] then return -1
@ -1803,7 +1803,7 @@ class StubGenerator: public StubCodeGenerator {
{ {
BLOCK_COMMENT("assert klasses not null {"); BLOCK_COMMENT("assert klasses not null {");
Label L1, L2; Label L1, L2;
__ bnez(scratch_src_klass, L2); // it is broken if klass is NULL __ bnez(scratch_src_klass, L2); // it is broken if klass is null
__ bind(L1); __ bind(L1);
__ stop("broken null klass"); __ stop("broken null klass");
__ bind(L2); __ bind(L2);
@ -1833,7 +1833,7 @@ class StubGenerator: public StubCodeGenerator {
__ load_klass(t1, dst); __ load_klass(t1, dst);
__ bne(t1, scratch_src_klass, L_failed); __ bne(t1, scratch_src_klass, L_failed);
// if [src->is_Array() != NULL] then return -1 // if src->is_Array() isn't null then return -1
// i.e. (lh >= 0) // i.e. (lh >= 0)
__ test_bit(t0, lh, 31); __ test_bit(t0, lh, 31);
__ beqz(t0, L_failed); __ beqz(t0, L_failed);
@ -2176,13 +2176,13 @@ class StubGenerator: public StubCodeGenerator {
} }
void generate_arraycopy_stubs() { void generate_arraycopy_stubs() {
address entry = NULL; address entry = nullptr;
address entry_jbyte_arraycopy = NULL; address entry_jbyte_arraycopy = nullptr;
address entry_jshort_arraycopy = NULL; address entry_jshort_arraycopy = nullptr;
address entry_jint_arraycopy = NULL; address entry_jint_arraycopy = nullptr;
address entry_oop_arraycopy = NULL; address entry_oop_arraycopy = nullptr;
address entry_jlong_arraycopy = NULL; address entry_jlong_arraycopy = nullptr;
address entry_checkcast_arraycopy = NULL; address entry_checkcast_arraycopy = nullptr;
generate_copy_longs(copy_f, c_rarg0, c_rarg1, t1, copy_forwards); generate_copy_longs(copy_f, c_rarg0, c_rarg1, t1, copy_forwards);
generate_copy_longs(copy_b, c_rarg0, c_rarg1, t1, copy_backwards); generate_copy_longs(copy_b, c_rarg0, c_rarg1, t1, copy_backwards);
@ -2198,7 +2198,7 @@ class StubGenerator: public StubCodeGenerator {
"jbyte_arraycopy"); "jbyte_arraycopy");
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
"arrayof_jbyte_disjoint_arraycopy"); "arrayof_jbyte_disjoint_arraycopy");
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr,
"arrayof_jbyte_arraycopy"); "arrayof_jbyte_arraycopy");
//*** jshort //*** jshort
@ -2210,7 +2210,7 @@ class StubGenerator: public StubCodeGenerator {
"jshort_arraycopy"); "jshort_arraycopy");
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
"arrayof_jshort_disjoint_arraycopy"); "arrayof_jshort_disjoint_arraycopy");
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr,
"arrayof_jshort_arraycopy"); "arrayof_jshort_arraycopy");
//*** jint //*** jint
@ -2253,7 +2253,7 @@ class StubGenerator: public StubCodeGenerator {
= generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit", = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_arrayof_oop_arraycopy_uninit StubRoutines::_arrayof_oop_arraycopy_uninit
= generate_conjoint_oop_copy(aligned, entry, NULL, "arrayof_oop_arraycopy_uninit", = generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
} }
@ -2263,7 +2263,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit;
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
@ -3722,7 +3722,7 @@ class StubGenerator: public StubCodeGenerator {
// the compilers are responsible for supplying a continuation point // the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved. // if they expect all registers to be preserved.
// n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0 // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
assert_cond(runtime_entry != NULL); assert_cond(runtime_entry != nullptr);
enum layout { enum layout {
fp_off = 0, fp_off = 0,
fp_off2, fp_off2,
@ -3737,7 +3737,7 @@ class StubGenerator: public StubCodeGenerator {
CodeBuffer code(name, insts_size, locs_size); CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code); MacroAssembler* masm = new MacroAssembler(&code);
assert_cond(oop_maps != NULL && masm != NULL); assert_cond(oop_maps != nullptr && masm != nullptr);
address start = __ pc(); address start = __ pc();
@ -3773,7 +3773,7 @@ class StubGenerator: public StubCodeGenerator {
// Generate oop map // Generate oop map
OopMap* map = new OopMap(framesize, 0); OopMap* map = new OopMap(framesize, 0);
assert_cond(map != NULL); assert_cond(map != nullptr);
oop_maps->add_gc_map(the_pc - start, map); oop_maps->add_gc_map(the_pc - start, map);
@ -3798,7 +3798,7 @@ class StubGenerator: public StubCodeGenerator {
frame_complete, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)), (framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false); oop_maps, false);
assert(stub != NULL, "create runtime stub fail!"); assert(stub != nullptr, "create runtime stub fail!");
return stub->entry_point(); return stub->entry_point();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -34,25 +34,25 @@
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::riscv::_get_previous_sp_entry = NULL; address StubRoutines::riscv::_get_previous_sp_entry = nullptr;
address StubRoutines::riscv::_f2i_fixup = NULL; address StubRoutines::riscv::_f2i_fixup = nullptr;
address StubRoutines::riscv::_f2l_fixup = NULL; address StubRoutines::riscv::_f2l_fixup = nullptr;
address StubRoutines::riscv::_d2i_fixup = NULL; address StubRoutines::riscv::_d2i_fixup = nullptr;
address StubRoutines::riscv::_d2l_fixup = NULL; address StubRoutines::riscv::_d2l_fixup = nullptr;
address StubRoutines::riscv::_float_sign_mask = NULL; address StubRoutines::riscv::_float_sign_mask = nullptr;
address StubRoutines::riscv::_float_sign_flip = NULL; address StubRoutines::riscv::_float_sign_flip = nullptr;
address StubRoutines::riscv::_double_sign_mask = NULL; address StubRoutines::riscv::_double_sign_mask = nullptr;
address StubRoutines::riscv::_double_sign_flip = NULL; address StubRoutines::riscv::_double_sign_flip = nullptr;
address StubRoutines::riscv::_zero_blocks = NULL; address StubRoutines::riscv::_zero_blocks = nullptr;
address StubRoutines::riscv::_compare_long_string_LL = NULL; address StubRoutines::riscv::_compare_long_string_LL = nullptr;
address StubRoutines::riscv::_compare_long_string_UU = NULL; address StubRoutines::riscv::_compare_long_string_UU = nullptr;
address StubRoutines::riscv::_compare_long_string_LU = NULL; address StubRoutines::riscv::_compare_long_string_LU = nullptr;
address StubRoutines::riscv::_compare_long_string_UL = NULL; address StubRoutines::riscv::_compare_long_string_UL = nullptr;
address StubRoutines::riscv::_string_indexof_linear_ll = NULL; address StubRoutines::riscv::_string_indexof_linear_ll = nullptr;
address StubRoutines::riscv::_string_indexof_linear_uu = NULL; address StubRoutines::riscv::_string_indexof_linear_uu = nullptr;
address StubRoutines::riscv::_string_indexof_linear_ul = NULL; address StubRoutines::riscv::_string_indexof_linear_ul = nullptr;
address StubRoutines::riscv::_large_byte_array_inflate = NULL; address StubRoutines::riscv::_large_byte_array_inflate = nullptr;
address StubRoutines::riscv::_method_entry_barrier = NULL; address StubRoutines::riscv::_method_entry_barrier = nullptr;
bool StubRoutines::riscv::_completed = false; bool StubRoutines::riscv::_completed = false;

View File

@ -101,7 +101,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// stack args <- esp // stack args <- esp
// garbage // garbage
// expression stack bottom // expression stack bottom
// bcp (NULL) // bcp (null)
// ... // ...
// Restore ra // Restore ra
@ -160,8 +160,8 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
// [ arg ] // [ arg ]
// retaddr in ra // retaddr in ra
address fn = NULL; address fn = nullptr;
address entry_point = NULL; address entry_point = nullptr;
Register continuation = ra; Register continuation = ra;
switch (kind) { switch (kind) {
case Interpreter::java_lang_math_abs: case Interpreter::java_lang_math_abs:
@ -182,7 +182,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dsin() == NULL) { if (StubRoutines::dsin() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
@ -195,7 +195,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dcos() == NULL) { if (StubRoutines::dcos() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
@ -208,7 +208,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dtan() == NULL) { if (StubRoutines::dtan() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
@ -221,7 +221,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dlog() == NULL) { if (StubRoutines::dlog() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
@ -234,7 +234,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dlog10() == NULL) { if (StubRoutines::dlog10() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
@ -247,7 +247,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
__ mv(x9, ra); __ mv(x9, ra);
continuation = x9; // The first callee-saved register continuation = x9; // The first callee-saved register
if (StubRoutines::dexp() == NULL) { if (StubRoutines::dexp() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
@ -261,7 +261,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ fld(f10, Address(esp, 2 * Interpreter::stackElementSize)); __ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
__ fld(f11, Address(esp)); __ fld(f11, Address(esp));
__ mv(sp, x19_sender_sp); __ mv(sp, x19_sender_sp);
if (StubRoutines::dpow() == NULL) { if (StubRoutines::dpow() == nullptr) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
} else { } else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
@ -291,7 +291,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
default: default:
; ;
} }
if (entry_point != NULL) { if (entry_point != nullptr) {
__ jr(continuation); __ jr(continuation);
} }
@ -308,7 +308,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry // abstract method entry
// pop return address, reset last_sp to NULL // pop return address, reset last_sp to null
__ empty_expression_stack(); __ empty_expression_stack();
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed) __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed) __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
@ -388,7 +388,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address TemplateInterpreterGenerator::generate_exception_handler_common( address TemplateInterpreterGenerator::generate_exception_handler_common(
const char* name, const char* message, bool pass_oop) { const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both"); assert(!pass_oop || message == nullptr, "either oop or message but not both");
address entry = __ pc(); address entry = __ pc();
if (pass_oop) { if (pass_oop) {
// object is at TOS // object is at TOS
@ -405,9 +405,9 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
create_klass_exception), create_klass_exception),
c_rarg1, c_rarg2); c_rarg1, c_rarg2);
} else { } else {
// kind of lame ExternalAddress can't take NULL because // kind of lame ExternalAddress can't take null because
// external_word_Relocation will assert. // external_word_Relocation will assert.
if (message != NULL) { if (message != nullptr) {
__ la(c_rarg2, Address((address)message)); __ la(c_rarg2, Address((address)message));
} else { } else {
__ mv(c_rarg2, NULL_WORD); __ mv(c_rarg2, NULL_WORD);
@ -426,7 +426,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Restore stack bottom in case i2c adjusted stack // Restore stack bottom in case i2c adjusted stack
__ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); __ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call // and null it as marker that esp is now tos until next java call
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -483,7 +483,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
// Restore expression stack pointer // Restore expression stack pointer
__ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); __ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
// NULL last_sp until next java call // null last_sp until next java call
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
// handle exceptions // handle exceptions
@ -497,7 +497,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
__ bind(L); __ bind(L);
} }
if (continuation == NULL) { if (continuation == nullptr) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else { } else {
__ jump_to_entry(continuation); __ jump_to_entry(continuation);
@ -522,7 +522,7 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
address runtime_entry) { address runtime_entry) {
assert_cond(runtime_entry != NULL); assert_cond(runtime_entry != nullptr);
address entry = __ pc(); address entry = __ pc();
__ push(state); __ push(state);
__ push_cont_fastpath(xthread); __ push_cont_fastpath(xthread);
@ -652,7 +652,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted. // Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError. // Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
// all done with frame size check // all done with frame size check
@ -694,7 +694,7 @@ void TemplateInterpreterGenerator::lock_method() {
{ {
Label L; Label L;
__ bnez(x10, L); __ bnez(x10, L);
__ stop("synchronization object is NULL"); __ stop("synchronization object is null");
__ bind(L); __ bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -840,7 +840,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
Label slow_path; Label slow_path;
const Register local_0 = c_rarg0; const Register local_0 = c_rarg0;
// Check if local 0 != NULL // Check if local 0 isn't null
// If the receiver is null then it is OK to jump to the slow path. // If the receiver is null then it is OK to jump to the slow path.
__ ld(local_0, Address(esp, 0)); __ ld(local_0, Address(esp, 0));
__ beqz(local_0, slow_path); __ beqz(local_0, slow_path);
@ -1630,7 +1630,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ bne(t1, t0, L_done); __ bne(t1, t0, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument,or NULL. // Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
__ ld(c_rarg0, Address(xlocals, 0)); __ ld(c_rarg0, Address(xlocals, 0));
__ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp); __ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
@ -1726,7 +1726,7 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
address& fep, address& fep,
address& dep, address& dep,
address& vep) { address& vep) {
assert(t != NULL && t->is_valid() && t->tos_in() == vtos, "illegal template"); assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
Label L; Label L;
aep = __ pc(); __ push_ptr(); __ j(L); aep = __ pc(); __ push_ptr(); __ j(L);
fep = __ pc(); __ push_f(); __ j(L); fep = __ pc(); __ push_f(); __ j(L);
@ -1795,7 +1795,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// The run-time runtime saves the right registers, depending on // The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template. // the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL, "entry must have been generated"); assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
__ jal(Interpreter::trace_code(t->tos_in())); __ jal(Interpreter::trace_code(t->tos_in()));
__ reinit_heapbase(); __ reinit_heapbase();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -123,8 +123,8 @@ static inline Address at_tos_p5() {
} }
// Miscellaneous helper routines // Miscellaneous helper routines
// Store an oop (or NULL) at the Address described by obj. // Store an oop (or null) at the Address described by obj.
// If val == noreg this means store a NULL // If val == noreg this means store a null
static void do_oop_store(InterpreterMacroAssembler* _masm, static void do_oop_store(InterpreterMacroAssembler* _masm,
Address dst, Address dst,
Register val, Register val,
@ -391,7 +391,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ ld(tmp, Address(rarg, offset)); __ ld(tmp, Address(rarg, offset));
__ resolve_oop_handle(tmp, x15, t1); __ resolve_oop_handle(tmp, x15, t1);
__ bne(result, tmp, notNull); __ bne(result, tmp, notNull);
__ mv(result, zr); // NULL object reference __ mv(result, zr); // null object reference
__ bind(notNull); __ bind(notNull);
} }
@ -1050,7 +1050,7 @@ void TemplateTable::aastore() {
Address element_address(x14, 0); Address element_address(x14, 0);
// do array store check - check for NULL value first // do array store check - check for null value first
__ beqz(x10, is_null); __ beqz(x10, is_null);
// Move subklass into x11 // Move subklass into x11
@ -1078,11 +1078,11 @@ void TemplateTable::aastore() {
do_oop_store(_masm, element_address, x10, IS_ARRAY); do_oop_store(_masm, element_address, x10, IS_ARRAY);
__ j(done); __ j(done);
// Have a NULL in x10, x13=array, x12=index. Store NULL at ary[idx] // Have a null in x10, x13=array, x12=index. Store null at ary[idx]
__ bind(is_null); __ bind(is_null);
__ profile_null_seen(x12); __ profile_null_seen(x12);
// Store a NULL // Store a null
do_oop_store(_masm, element_address, noreg, IS_ARRAY); do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments // Pop stack arguments
@ -1717,7 +1717,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
x12); x12);
__ load_unsigned_byte(x11, Address(xbcp, 0)); // restore target bytecode __ load_unsigned_byte(x11, Address(xbcp, 0)); // restore target bytecode
// x10: osr nmethod (osr ok) or NULL (osr not possible) // x10: osr nmethod (osr ok) or null (osr not possible)
// w11: target bytecode // w11: target bytecode
// x12: temporary // x12: temporary
__ beqz(x10, dispatch); // test result -- no osr if null __ beqz(x10, dispatch); // test result -- no osr if null
@ -2181,7 +2181,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ load_resolved_method_at_index(byte_no, temp, Rcache); __ load_resolved_method_at_index(byte_no, temp, Rcache);
__ load_method_holder(temp, temp); __ load_method_holder(temp, temp);
__ clinit_barrier(temp, t0, NULL, &clinit_barrier_slow); __ clinit_barrier(temp, t0, nullptr, &clinit_barrier_slow);
} }
} }
@ -2344,12 +2344,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
__ la(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset()))); __ la(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
if (is_static) { if (is_static) {
__ mv(c_rarg1, zr); // NULL object reference __ mv(c_rarg1, zr); // null object reference
} else { } else {
__ ld(c_rarg1, at_tos()); // get object pointer without popping it __ ld(c_rarg1, at_tos()); // get object pointer without popping it
__ verify_oop(c_rarg1); __ verify_oop(c_rarg1);
} }
// c_rarg1: object pointer or NULL // c_rarg1: object pointer or null
// c_rarg2: cache entry pointer // c_rarg2: cache entry pointer
// c_rarg3: jvalue object on the stack // c_rarg3: jvalue object on the stack
__ call_VM(noreg, CAST_FROM_FN_PTR(address, __ call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -2587,7 +2587,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
__ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset)); __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
// object (tos) // object (tos)
__ mv(c_rarg3, esp); __ mv(c_rarg3, esp);
// c_rarg1: object pointer set up above (NULL if static) // c_rarg1: object pointer set up above (null if static)
// c_rarg2: cache entry pointer // c_rarg2: cache entry pointer
// c_rarg3: jvalue object on the stack // c_rarg3: jvalue object on the stack
__ call_VM(noreg, __ call_VM(noreg,
@ -3604,7 +3604,7 @@ void TemplateTable::checkcast() {
__ bind(ok_is_subtype); __ bind(ok_is_subtype);
__ mv(x10, x13); // Restore object in x13 __ mv(x10, x13); // Restore object in x13
// Collect counts on whether this test sees NULLs a lot or not. // Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ j(done); __ j(done);
__ bind(is_null); __ bind(is_null);
@ -3659,7 +3659,7 @@ void TemplateTable::instanceof() {
__ bind(ok_is_subtype); __ bind(ok_is_subtype);
__ mv(x10, 1); __ mv(x10, 1);
// Collect counts on whether this test sees NULLs a lot or not. // Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ j(done); __ j(done);
__ bind(is_null); __ bind(is_null);
@ -3668,8 +3668,8 @@ void TemplateTable::instanceof() {
__ bind(is_null); // same as 'done' __ bind(is_null); // same as 'done'
} }
__ bind(done); __ bind(done);
// x10 = 0: obj == NULL or obj is not an instanceof the specified klass // x10 = 0: obj is null or obj is not an instanceof the specified klass
// x10 = 1: obj != NULL and obj is an instanceof the specified klass // x10 = 1: obj isn't null and obj is an instanceof the specified klass
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -3730,7 +3730,7 @@ void TemplateTable::athrow() {
void TemplateTable::monitorenter() { void TemplateTable::monitorenter() {
transition(atos, vtos); transition(atos, vtos);
// check for NULL object // check for null object
__ null_check(x10); __ null_check(x10);
const Address monitor_block_top( const Address monitor_block_top(
@ -3742,7 +3742,7 @@ void TemplateTable::monitorenter() {
Label allocated; Label allocated;
// initialize entry pointer // initialize entry pointer
__ mv(c_rarg1, zr); // points to free slot or NULL __ mv(c_rarg1, zr); // points to free slot or null
// find a free slot in the monitor block (result in c_rarg1) // find a free slot in the monitor block (result in c_rarg1)
{ {
@ -3828,7 +3828,7 @@ void TemplateTable::monitorenter() {
void TemplateTable::monitorexit() { void TemplateTable::monitorexit() {
transition(atos, vtos); transition(atos, vtos);
// check for NULL object // check for null object
__ null_check(x10); __ null_check(x10);
const Address monitor_block_top( const Address monitor_block_top(

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -210,7 +210,7 @@ void VM_Version::initialize() {
char buf[512]; char buf[512];
buf[0] = '\0'; buf[0] = '\0';
if (_uarch != NULL && strcmp(_uarch, "") != 0) snprintf(buf, sizeof(buf), "%s,", _uarch); if (_uarch != nullptr && strcmp(_uarch, "") != 0) snprintf(buf, sizeof(buf), "%s,", _uarch);
strcat(buf, "rv64"); strcat(buf, "rv64");
#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) if (_features & CPU_##id) strcat(buf, name); #define ADD_FEATURE_IF_SUPPORTED(id, name, bit) if (_features & CPU_##id) strcat(buf, name);
CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED) CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -52,22 +52,22 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true); const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid // We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs. // overflow in subsequently generated stubs.
address start_pc = NULL; address start_pc = nullptr;
int slop_bytes = 0; int slop_bytes = 0;
int slop_delta = 0; int slop_delta = 0;
ResourceMark rm; ResourceMark rm;
CodeBuffer cb(s->entry_point(), stub_code_length); CodeBuffer cb(s->entry_point(), stub_code_length);
MacroAssembler* masm = new MacroAssembler(&cb); MacroAssembler* masm = new MacroAssembler(&cb);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
#if (!defined(PRODUCT) && defined(COMPILER2)) #if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) { if (CountCompiledCalls) {
@ -122,7 +122,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ beqz(xmethod, L); __ beqz(xmethod, L);
__ ld(t0, Address(xmethod, Method::from_compiled_offset())); __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
__ bnez(t0, L); __ bnez(t0, L);
__ stop("Vtable entry is NULL"); __ stop("Vtable entry is null");
__ bind(L); __ bind(L);
} }
#endif // PRODUCT #endif // PRODUCT
@ -144,21 +144,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false); const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid // We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs. // overflow in subsequently generated stubs.
address start_pc = NULL; address start_pc = nullptr;
int slop_bytes = 0; int slop_bytes = 0;
int slop_delta = 0; int slop_delta = 0;
ResourceMark rm; ResourceMark rm;
CodeBuffer cb(s->entry_point(), stub_code_length); CodeBuffer cb(s->entry_point(), stub_code_length);
MacroAssembler* masm = new MacroAssembler(&cb); MacroAssembler* masm = new MacroAssembler(&cb);
assert_cond(masm != NULL); assert_cond(masm != nullptr);
#if (!defined(PRODUCT) && defined(COMPILER2)) #if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) { if (CountCompiledCalls) {
@ -244,7 +244,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// We force resolving of the call site by jumping to the "handle // We force resolving of the call site by jumping to the "handle
// wrong method" stub, and so let the interpreter runtime do all the // wrong method" stub, and so let the interpreter runtime do all the
// dirty work. // dirty work.
assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order"); assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
masm->flush(); masm->flush();