8301496: Replace NULL with nullptr in cpu/riscv
Reviewed-by: dholmes, fyang
This commit is contained in:
parent
54bf370079
commit
d2ce04bb10
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -66,7 +66,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
||||
|
||||
const int stub_code = frame::entry_frame_after_call_words;
|
||||
assert_cond(method != NULL);
|
||||
assert_cond(method != nullptr);
|
||||
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||
Interpreter::stackElementWords;
|
||||
return (overhead_size + method_stack + stub_code);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -381,7 +381,7 @@ public:
|
||||
}
|
||||
|
||||
static void patch(address a, unsigned msb, unsigned lsb, unsigned val) {
|
||||
assert_cond(a != NULL);
|
||||
assert_cond(a != nullptr);
|
||||
assert_cond(msb >= lsb && msb <= 31);
|
||||
unsigned nbits = msb - lsb + 1;
|
||||
guarantee(val < (1U << nbits), "Field too big for insn");
|
||||
@ -1926,7 +1926,7 @@ public:
|
||||
|
||||
// patch a 16-bit instruction.
|
||||
static void c_patch(address a, unsigned msb, unsigned lsb, uint16_t val) {
|
||||
assert_cond(a != NULL);
|
||||
assert_cond(a != nullptr);
|
||||
assert_cond(msb >= lsb && msb <= 15);
|
||||
unsigned nbits = msb - lsb + 1;
|
||||
guarantee(val < (1U << nbits), "Field too big for insn");
|
||||
@ -2171,7 +2171,7 @@ public:
|
||||
emit_int16(insn); \
|
||||
} \
|
||||
void NAME(address dest) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding"); \
|
||||
c_j(distance); \
|
||||
@ -2199,7 +2199,7 @@ public:
|
||||
emit_int16(insn); \
|
||||
} \
|
||||
void NAME(Register Rs1, address dest) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
assert(is_simm9(distance) && ((distance % 2) == 0), "invalid encoding"); \
|
||||
NAME(Rs1, distance); \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -49,7 +49,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||
});
|
||||
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
|
||||
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
|
||||
@ -253,7 +253,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
}
|
||||
|
||||
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
address a = NULL;
|
||||
address a = nullptr;
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
@ -322,7 +322,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
|
||||
relocInfo::static_call_type);
|
||||
address call = __ trampoline_call(resolve);
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
ce->bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -263,7 +263,7 @@ void LIR_Assembler::arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr rig
|
||||
|
||||
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
|
||||
CodeEmitInfo* info, bool pop_fpu_stack) {
|
||||
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
|
||||
assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
|
||||
|
||||
if (left->is_single_cpu()) {
|
||||
arith_op_single_cpu(code, left, right, dest);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -42,7 +42,7 @@ void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register l
|
||||
arraycopy_store_args(src, src_pos, length, dst, dst_pos);
|
||||
|
||||
address copyfunc_addr = StubRoutines::generic_arraycopy();
|
||||
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
|
||||
assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
|
||||
|
||||
// The arguments are in java calling convention so we shift them
|
||||
// to C convention
|
||||
@ -80,7 +80,7 @@ void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register l
|
||||
void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, int flags) {
|
||||
// test for NULL
|
||||
// test for null
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ beqz(src, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
@ -220,7 +220,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
|
||||
PUSH(src, dst);
|
||||
__ load_klass(src, src);
|
||||
__ load_klass(dst, dst);
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
|
||||
|
||||
PUSH(src, dst);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
@ -231,7 +231,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
|
||||
POP(src, dst);
|
||||
|
||||
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
|
||||
if (copyfunc_addr != NULL) { // use stub if available
|
||||
if (copyfunc_addr != nullptr) { // use stub if available
|
||||
arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags);
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) {
|
||||
assert(default_type != NULL, "NULL default_type!");
|
||||
assert(default_type != nullptr, "null default_type!");
|
||||
BasicType basic_type = default_type->element_type()->basic_type();
|
||||
|
||||
if (basic_type == T_ARRAY) { basic_type = T_OBJECT; }
|
||||
@ -299,16 +299,16 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
CodeStub* stub = op->stub();
|
||||
int flags = op->flags();
|
||||
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
if (is_reference_type(basic_type)) { basic_type = T_OBJECT; }
|
||||
|
||||
// if we don't know anything, just go through the generic arraycopy
|
||||
if (default_type == NULL) {
|
||||
if (default_type == nullptr) {
|
||||
generic_arraycopy(src, src_pos, length, dst, dst_pos, stub);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
|
||||
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(),
|
||||
"must be true at this point");
|
||||
|
||||
arraycopy_simple_check(src, src_pos, length, dst, dst_pos, tmp, stub, flags);
|
||||
@ -330,11 +330,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
|
||||
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
|
||||
const char *name = NULL;
|
||||
const char *name = nullptr;
|
||||
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
|
||||
|
||||
CodeBlob *cb = CodeCache::find_blob(entry);
|
||||
if (cb != NULL) {
|
||||
if (cb != nullptr) {
|
||||
__ far_call(RuntimeAddress(entry));
|
||||
} else {
|
||||
const int args_num = 3;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -201,7 +201,7 @@ Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) {
|
||||
void LIR_Assembler::osr_entry() {
|
||||
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
|
||||
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
|
||||
guarantee(osr_entry != NULL, "NULL osr_entry!");
|
||||
guarantee(osr_entry != nullptr, "null osr_entry!");
|
||||
ValueStack* entry_state = osr_entry->state();
|
||||
int number_of_locks = entry_state->locks_size();
|
||||
|
||||
@ -251,7 +251,7 @@ void LIR_Assembler::osr_entry() {
|
||||
Label L;
|
||||
__ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
|
||||
__ bnez(t0, L);
|
||||
__ stop("locked object is NULL");
|
||||
__ stop("locked object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -288,7 +288,7 @@ int LIR_Assembler::check_icache() {
|
||||
}
|
||||
|
||||
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
|
||||
if (o == NULL) {
|
||||
if (o == nullptr) {
|
||||
__ mv(reg, zr);
|
||||
} else {
|
||||
__ movoop(reg, o);
|
||||
@ -309,7 +309,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
|
||||
int LIR_Assembler::emit_exception_handler() {
|
||||
// generate code for exception handler
|
||||
address handler_base = __ start_a_stub(exception_handler_size());
|
||||
if (handler_base == NULL) {
|
||||
if (handler_base == nullptr) {
|
||||
// not enough space left for the handler
|
||||
bailout("exception handler overflow");
|
||||
return -1;
|
||||
@ -356,7 +356,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
}
|
||||
|
||||
// Perform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
MonitorExitStub* stub = nullptr;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::r10_opr);
|
||||
stub = new MonitorExitStub(FrameMap::r10_opr, true, 0);
|
||||
@ -384,7 +384,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
if (stub != nullptr) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
@ -394,7 +394,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// generate code for exception handler
|
||||
address handler_base = __ start_a_stub(deopt_handler_size());
|
||||
if (handler_base == NULL) {
|
||||
if (handler_base == nullptr) {
|
||||
// not enough space left for the handler
|
||||
bailout("deopt handler overflow");
|
||||
return -1;
|
||||
@ -427,7 +427,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
}
|
||||
|
||||
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
|
||||
guarantee(info != NULL, "Shouldn't be NULL");
|
||||
guarantee(info != nullptr, "Shouldn't be null");
|
||||
__ get_polling_page(t0, relocInfo::poll_type);
|
||||
add_debug_info_for_branch(info); // This isn't just debug info:
|
||||
// it's the oop map
|
||||
@ -445,7 +445,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
assert(src->is_constant(), "should not call otherwise");
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
address const_addr = NULL;
|
||||
address const_addr = nullptr;
|
||||
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
@ -482,13 +482,13 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
|
||||
case T_FLOAT:
|
||||
const_addr = float_constant(c->as_jfloat());
|
||||
assert(const_addr != NULL, "must create float constant in the constant table");
|
||||
assert(const_addr != nullptr, "must create float constant in the constant table");
|
||||
__ flw(dest->as_float_reg(), InternalAddress(const_addr));
|
||||
break;
|
||||
|
||||
case T_DOUBLE:
|
||||
const_addr = double_constant(c->as_jdouble());
|
||||
assert(const_addr != NULL, "must create double constant in the constant table");
|
||||
assert(const_addr != nullptr, "must create double constant in the constant table");
|
||||
__ fld(dest->as_double_reg(), InternalAddress(const_addr));
|
||||
break;
|
||||
|
||||
@ -503,15 +503,15 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
switch (c->type()) {
|
||||
case T_OBJECT:
|
||||
if (c->as_jobject() == NULL) {
|
||||
if (c->as_jobject() == nullptr) {
|
||||
__ sd(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
} else {
|
||||
const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
|
||||
reg2stack(FrameMap::t1_opr, dest, c->type(), false);
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS: // fall through
|
||||
const2reg(src, FrameMap::t1_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::t1_opr, lir_patch_none, nullptr);
|
||||
reg2stack(FrameMap::t1_opr, dest, c->type(), false);
|
||||
case T_INT: // fall through
|
||||
case T_FLOAT:
|
||||
@ -582,7 +582,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
ShouldNotReachHere();
|
||||
insn = &MacroAssembler::sd; // unreachable
|
||||
}
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
(_masm->*insn)(zr, as_Address(to_addr), t0);
|
||||
@ -730,7 +730,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check(null_check_here, info);
|
||||
}
|
||||
}
|
||||
@ -800,7 +800,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
return;
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
@ -901,12 +901,12 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
/* is_unordered */ (condition == lir_cond_greaterEqual || condition == lir_cond_greater) ? false : true);
|
||||
|
||||
Label done;
|
||||
move_op(opr2, result, type, lir_patch_none, NULL,
|
||||
move_op(opr2, result, type, lir_patch_none, nullptr,
|
||||
false, // pop_fpu_stack
|
||||
false); // wide
|
||||
__ j(done);
|
||||
__ bind(label);
|
||||
move_op(opr1, result, type, lir_patch_none, NULL,
|
||||
move_op(opr1, result, type, lir_patch_none, nullptr,
|
||||
false, // pop_fpu_stack
|
||||
false); // wide
|
||||
__ bind(done);
|
||||
@ -915,7 +915,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
|
||||
LIR_Condition condition = op->cond();
|
||||
if (condition == lir_cond_always) {
|
||||
if (op->info() != NULL) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_branch(op->info());
|
||||
}
|
||||
} else {
|
||||
@ -1078,12 +1078,12 @@ void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfil
|
||||
|
||||
void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
assert(method != NULL, "Should have method");
|
||||
assert(method != nullptr, "Should have method");
|
||||
int bci = op->profiled_bci();
|
||||
*md = method->method_data_or_null();
|
||||
guarantee(*md != NULL, "Sanity");
|
||||
guarantee(*md != nullptr, "Sanity");
|
||||
*data = ((*md)->bci_to_data(bci));
|
||||
assert(*data != NULL, "need data for type check");
|
||||
assert(*data != nullptr, "need data for type check");
|
||||
assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
}
|
||||
|
||||
@ -1118,7 +1118,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe
|
||||
}
|
||||
} else {
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
|
||||
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
|
||||
__ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
|
||||
@ -1165,8 +1165,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
Register Rtmp1 = noreg;
|
||||
|
||||
// check if it needs to be profiled
|
||||
ciMethodData* md = NULL;
|
||||
ciProfileData* data = NULL;
|
||||
ciMethodData* md = nullptr;
|
||||
ciProfileData* data = nullptr;
|
||||
|
||||
const bool should_profile = op->should_profile();
|
||||
if (should_profile) {
|
||||
@ -1354,7 +1354,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
|
||||
|
||||
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
address call = __ trampoline_call(Address(op->addr(), rtype));
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -1364,7 +1364,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
|
||||
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
address call = __ ic_call(op->addr());
|
||||
if (call == NULL) {
|
||||
if (call == nullptr) {
|
||||
bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -1376,7 +1376,7 @@ void LIR_Assembler::emit_static_call_stub() {
|
||||
address call_pc = __ pc();
|
||||
MacroAssembler::assert_alignment(call_pc);
|
||||
address stub = __ start_a_stub(call_stub_size());
|
||||
if (stub == NULL) {
|
||||
if (stub == nullptr) {
|
||||
bailout("static call stub overflow");
|
||||
return;
|
||||
}
|
||||
@ -1500,7 +1500,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
if (UseHeavyMonitors) {
|
||||
if (op->info() != NULL) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ null_check(obj);
|
||||
}
|
||||
@ -1509,7 +1509,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// add debug info for NullPointerException only if one is possible
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != NULL) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check(null_check_offset, op->info());
|
||||
}
|
||||
} else if (op->code() == lir_unlock) {
|
||||
@ -1526,7 +1526,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
|
||||
Register result = op->result_opr()->as_pointer_register();
|
||||
|
||||
CodeEmitInfo* info = op->info();
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
@ -1544,9 +1544,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
guarantee(md != NULL, "Sanity");
|
||||
guarantee(md != nullptr, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
|
||||
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
|
||||
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
||||
Register mdo = op->mdo()->as_register();
|
||||
__ mov_metadata(mdo, md->constant_encoding());
|
||||
@ -1559,7 +1559,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
assert_different_registers(mdo, recv);
|
||||
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
|
||||
ciKlass* known_klass = op->known_holder();
|
||||
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
|
||||
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
|
||||
// We know the type that will be seen at this call site; we can
|
||||
// statically update the MethodData* rather than needing to do
|
||||
// dynamic tests on the receiver type
|
||||
@ -1582,7 +1582,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
// VirtualCallData rather than just the first time
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
ciKlass* receiver = vc_data->receiver(i);
|
||||
if (receiver == NULL) {
|
||||
if (receiver == nullptr) {
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
|
||||
__ mov_metadata(t1, known_klass->constant_encoding());
|
||||
__ sd(t1, recv_addr);
|
||||
@ -1618,8 +1618,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { Unimplemented(); }
|
||||
void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
|
||||
Register tmp, Label &next, Label &none,
|
||||
Address mdo_addr) {
|
||||
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
|
||||
if (exact_klass != NULL) {
|
||||
if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
|
||||
if (exact_klass != nullptr) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
} else {
|
||||
__ load_klass(tmp, tmp);
|
||||
@ -1650,7 +1650,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
|
||||
__ beqz(t0, next);
|
||||
}
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
|
||||
|
||||
__ ld(tmp, mdo_addr);
|
||||
@ -1676,7 +1676,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
|
||||
void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
|
||||
Address mdo_addr, Label &next) {
|
||||
// There's a single possible klass at this profile point
|
||||
assert(exact_klass != NULL, "should be");
|
||||
assert(exact_klass != nullptr, "should be");
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
__ ld(t1, mdo_addr);
|
||||
@ -1705,7 +1705,7 @@ void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_kla
|
||||
// first time here. Set profile type.
|
||||
__ sd(tmp, mdo_addr);
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
|
||||
|
||||
__ ld(tmp, mdo_addr);
|
||||
@ -1744,7 +1744,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
Label update, next, none;
|
||||
|
||||
bool do_null = !not_null;
|
||||
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
|
||||
bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
|
||||
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
|
||||
|
||||
assert(do_null || do_update, "why are we here?");
|
||||
@ -1769,7 +1769,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
if (do_update) {
|
||||
#ifdef ASSERT
|
||||
if (exact_klass != NULL) {
|
||||
if (exact_klass != nullptr) {
|
||||
check_exact_klass(tmp, exact_klass);
|
||||
}
|
||||
#endif
|
||||
@ -1840,7 +1840,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
|
||||
assert(!tmp->is_valid(), "don't need temporary");
|
||||
|
||||
CodeBlob *cb = CodeCache::find_blob(dest);
|
||||
if (cb != NULL) {
|
||||
if (cb != nullptr) {
|
||||
__ far_call(RuntimeAddress(dest));
|
||||
} else {
|
||||
RuntimeAddress target(dest);
|
||||
@ -1851,7 +1851,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
|
||||
});
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
if (info != nullptr) {
|
||||
add_call_info_here(info);
|
||||
}
|
||||
__ post_call_nop();
|
||||
@ -1992,7 +1992,7 @@ int LIR_Assembler::array_element_size(BasicType type) const {
|
||||
// bailout case the pointer won't be to unique storage.
|
||||
address LIR_Assembler::float_constant(float f) {
|
||||
address const_addr = __ float_constant(f);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -2002,7 +2002,7 @@ address LIR_Assembler::float_constant(float f) {
|
||||
|
||||
address LIR_Assembler::double_constant(double d) {
|
||||
address const_addr = __ double_constant(d);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -2012,7 +2012,7 @@ address LIR_Assembler::double_constant(double d) {
|
||||
|
||||
address LIR_Assembler::int_constant(jlong n) {
|
||||
address const_addr = __ long_constant(n);
|
||||
if (const_addr == NULL) {
|
||||
if (const_addr == nullptr) {
|
||||
bailout("const section overflow");
|
||||
return __ code()->consts()->start();
|
||||
} else {
|
||||
@ -2042,7 +2042,7 @@ void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
|
||||
address target = NULL;
|
||||
address target = nullptr;
|
||||
|
||||
switch (patching_id(info)) {
|
||||
case PatchingStub::access_field_id:
|
||||
@ -2109,8 +2109,8 @@ void LIR_Assembler::typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile
|
||||
CodeStub* stub = op->stub();
|
||||
|
||||
// check if it needs to be profiled
|
||||
ciMethodData* md = NULL;
|
||||
ciProfileData* data = NULL;
|
||||
ciMethodData* md = nullptr;
|
||||
ciProfileData* data = nullptr;
|
||||
|
||||
if (should_profile) {
|
||||
data_check(op, &md, &data);
|
||||
@ -2179,7 +2179,7 @@ void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo,
|
||||
// get instance klass (it's already uncompressed)
|
||||
__ ld(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtype_slow_path(...)
|
||||
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
|
||||
__ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
|
||||
@ -2199,10 +2199,10 @@ void LIR_Assembler::const2reg_helper(LIR_Opr src) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
case T_METADATA:
|
||||
const2reg(src, FrameMap::t0_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::t0_opr, lir_patch_none, nullptr);
|
||||
break;
|
||||
case T_LONG:
|
||||
const2reg(src, FrameMap::t0_long_opr, lir_patch_none, NULL);
|
||||
const2reg(src, FrameMap::t0_long_opr, lir_patch_none, nullptr);
|
||||
break;
|
||||
case T_FLOAT:
|
||||
case T_DOUBLE:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -62,7 +62,7 @@ private:
|
||||
void caswu(Register addr, Register newval, Register cmpval);
|
||||
void casl(Register addr, Register newval, Register cmpval);
|
||||
|
||||
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL);
|
||||
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = nullptr);
|
||||
|
||||
void deoptimize_trap(CodeEmitInfo *info);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -108,41 +108,41 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
|
||||
|
||||
|
||||
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
if (v->type()->as_IntConstant() != nullptr) {
|
||||
return v->type()->as_IntConstant()->value() == 0;
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
} else if (v->type()->as_LongConstant() != nullptr) {
|
||||
return v->type()->as_LongConstant()->value() == 0;
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
} else if (v->type()->as_ObjectConstant() != nullptr) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else if (v->type()->as_FloatConstant() != NULL) {
|
||||
} else if (v->type()->as_FloatConstant() != nullptr) {
|
||||
return jint_cast(v->type()->as_FloatConstant()->value()) == 0.0f;
|
||||
} else if (v->type()->as_DoubleConstant() != NULL) {
|
||||
} else if (v->type()->as_DoubleConstant() != nullptr) {
|
||||
return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0.0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool LIRGenerator::can_inline_as_constant(Value v) const {
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
if (v->type()->as_IntConstant() != nullptr) {
|
||||
int value = v->type()->as_IntConstant()->value();
|
||||
// "-value" must be defined for value may be used for sub
|
||||
return Assembler::is_simm12(value) && Assembler::is_simm12(- value);
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
} else if (v->type()->as_ObjectConstant() != nullptr) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
} else if (v->type()->as_LongConstant() != nullptr) {
|
||||
long value = v->type()->as_LongConstant()->value();
|
||||
// "-value" must be defined for value may be used for sub
|
||||
return Assembler::is_simm12(value) && Assembler::is_simm12(- value);
|
||||
} else if (v->type()->as_FloatConstant() != NULL) {
|
||||
} else if (v->type()->as_FloatConstant() != nullptr) {
|
||||
return v->type()->as_FloatConstant()->value() == 0.0f;
|
||||
} else if (v->type()->as_DoubleConstant() != NULL) {
|
||||
} else if (v->type()->as_DoubleConstant() != nullptr) {
|
||||
return v->type()->as_DoubleConstant()->value() == 0.0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
|
||||
if (c->as_constant() != NULL) {
|
||||
if (c->as_constant() != nullptr) {
|
||||
long constant = 0;
|
||||
switch (c->type()) {
|
||||
case T_INT: constant = c->as_jint(); break;
|
||||
@ -275,7 +275,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
||||
// "lock" stores the address of the monitor stack slot, so this is not an oop
|
||||
LIR_Opr lock = new_register(T_INT);
|
||||
|
||||
CodeEmitInfo* info_for_exception = NULL;
|
||||
CodeEmitInfo* info_for_exception = nullptr;
|
||||
if (x->needs_null_check()) {
|
||||
info_for_exception = state_for(x);
|
||||
}
|
||||
@ -419,7 +419,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
|
||||
}
|
||||
}
|
||||
rlock_result(x);
|
||||
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
|
||||
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -464,9 +464,9 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
|
||||
LIR_Opr ill = LIR_OprFact::illegalOpr;
|
||||
if (x->op() == Bytecodes::_irem) {
|
||||
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
|
||||
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
|
||||
} else if (x->op() == Bytecodes::_idiv) {
|
||||
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
|
||||
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr);
|
||||
}
|
||||
|
||||
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
|
||||
@ -500,7 +500,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
|
||||
// when an operand with use count 1 is the left operand, then it is
|
||||
// likely that no move for 2-operand-LIR-form is necessary
|
||||
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
|
||||
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
|
||||
x->swap_operands();
|
||||
}
|
||||
|
||||
@ -522,7 +522,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
|
||||
|
||||
value.load_item();
|
||||
if (count.is_constant()) {
|
||||
assert(count.type()->as_IntConstant() != NULL || count.type()->as_LongConstant() != NULL , "should be");
|
||||
assert(count.type()->as_IntConstant() != nullptr || count.type()->as_LongConstant() != nullptr , "should be");
|
||||
count.dont_load_item();
|
||||
} else {
|
||||
count.load_item();
|
||||
@ -672,7 +672,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
|
||||
LIR_Opr calc_result = rlock_result(x);
|
||||
LIR_Opr result_reg = result_register_for(x->type());
|
||||
|
||||
CallingConvention* cc = NULL;
|
||||
CallingConvention* cc = nullptr;
|
||||
|
||||
if (x->id() == vmIntrinsics::_dpow) {
|
||||
LIRItem value1(x->argument_at(1), this);
|
||||
@ -694,31 +694,31 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
|
||||
|
||||
switch (x->id()) {
|
||||
case vmIntrinsics::_dexp:
|
||||
if (StubRoutines::dexp() != NULL) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dexp() != nullptr) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dlog:
|
||||
if (StubRoutines::dlog() != NULL) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dlog() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dlog10:
|
||||
if (StubRoutines::dlog10() != NULL) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dlog10() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dsin:
|
||||
if (StubRoutines::dsin() != NULL) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dsin() != nullptr) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dcos:
|
||||
if (StubRoutines::dcos() != NULL) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dcos() != nullptr) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dtan:
|
||||
if (StubRoutines::dtan() != NULL) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dtan() != nullptr) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
case vmIntrinsics::_dpow:
|
||||
if (StubRoutines::dpow() != NULL) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
|
||||
if (StubRoutines::dpow() != nullptr) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); }
|
||||
else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); }
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
@ -762,7 +762,7 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
|
||||
set_no_result(x);
|
||||
|
||||
int flags;
|
||||
ciArrayKlass* expected_type = NULL;
|
||||
ciArrayKlass* expected_type = nullptr;
|
||||
arraycopy_helper(x, &flags, &expected_type);
|
||||
|
||||
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
|
||||
@ -869,7 +869,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
|
||||
LIRItem length(x->length(), this);
|
||||
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
|
||||
// and therefore provide the state before the parameters have been consumed
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
}
|
||||
@ -902,14 +902,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
|
||||
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
Values* dims = x->dims();
|
||||
int i = dims->length();
|
||||
LIRItemList* items = new LIRItemList(i, i, NULL);
|
||||
LIRItemList* items = new LIRItemList(i, i, nullptr);
|
||||
while (i-- > 0) {
|
||||
LIRItem* size = new LIRItem(dims->at(i), this);
|
||||
items->at_put(i, size);
|
||||
}
|
||||
|
||||
// Evaluate state_for early since it may emit code.
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
||||
@ -956,7 +956,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
|
||||
void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
LIRItem obj(x->obj(), this);
|
||||
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if (!x->klass()->is_loaded() ||
|
||||
(PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
|
||||
// must do this before locking the destination register as an oop register,
|
||||
@ -970,13 +970,13 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
(x->needs_exception_state() ? state_for(x) :
|
||||
state_for(x, x->state_before(), true /*ignore_xhandler*/ ));
|
||||
|
||||
CodeStub* stub = NULL;
|
||||
CodeStub* stub = nullptr;
|
||||
if (x->is_incompatible_class_change_check()) {
|
||||
assert(patching_info == NULL, "can't patch this");
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
|
||||
info_for_exception);
|
||||
} else if (x->is_invokespecial_receiver_check()) {
|
||||
assert(patching_info == NULL, "can't patch this");
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new DeoptimizeStub(info_for_exception,
|
||||
Deoptimization::Reason_class_check,
|
||||
Deoptimization::Action_none);
|
||||
@ -999,7 +999,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
|
||||
|
||||
// result and test object may not be in same register
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
CodeEmitInfo* patching_info = NULL;
|
||||
CodeEmitInfo* patching_info = nullptr;
|
||||
if ((!x->klass()->is_loaded() || PatchALot)) {
|
||||
// must do this before locking the destination register as an oop register
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
@ -80,7 +80,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
// displaced header address in the object header - if it is not the same, get the
|
||||
// object header instead
|
||||
la(t1, Address(obj, hdr_offset));
|
||||
cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL);
|
||||
cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/nullptr);
|
||||
// if the object header was the same, we're done
|
||||
// if the object header was not the same, it is now in the hdr register
|
||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
@ -99,7 +99,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
mv(t0, aligned_mask - (int)os::vm_page_size());
|
||||
andr(hdr, hdr, t0);
|
||||
// for recursive locking, the result is zero => save it in the displaced header
|
||||
// location (NULL in the displaced hdr location indicates recursive locking)
|
||||
// location (null in the displaced hdr location indicates recursive locking)
|
||||
sd(hdr, Address(disp_hdr, 0));
|
||||
// otherwise we don't care about the result and handle locking via runtime call
|
||||
bnez(hdr, slow_case, /* is_far */ true);
|
||||
@ -117,7 +117,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
||||
|
||||
// load displaced header
|
||||
ld(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is NULL we had recursive locking
|
||||
// if the loaded hdr is null we had recursive locking
|
||||
// if we had recursive locking, we are done
|
||||
beqz(hdr, done);
|
||||
// load object
|
||||
@ -298,7 +298,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
|
||||
|
||||
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
|
||||
verify_oop(receiver);
|
||||
// explicit NULL check not needed since load from [klass_offset] causes a trap
|
||||
// explicit null check not needed since load from [klass_offset] causes a trap
|
||||
// check against inline cache
|
||||
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
||||
assert_different_registers(receiver, iCache, t0, t2);
|
||||
@ -314,7 +314,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
|
||||
|
||||
// Insert nmethod entry barrier into frame.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
|
||||
bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::remove_frame(int framesize) {
|
||||
@ -398,8 +398,8 @@ static c1_float_cond_branch_insn c1_float_cond_branch[] =
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_ble,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_bge,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_bgt,
|
||||
NULL, // lir_cond_belowEqual
|
||||
NULL, // lir_cond_aboveEqual
|
||||
nullptr, // lir_cond_belowEqual
|
||||
nullptr, // lir_cond_aboveEqual
|
||||
|
||||
/* DOUBLE branches */
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_beq,
|
||||
@ -408,8 +408,8 @@ static c1_float_cond_branch_insn c1_float_cond_branch[] =
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_ble,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_bge,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_bgt,
|
||||
NULL, // lir_cond_belowEqual
|
||||
NULL // lir_cond_aboveEqual
|
||||
nullptr, // lir_cond_belowEqual
|
||||
nullptr // lir_cond_aboveEqual
|
||||
};
|
||||
|
||||
void C1_MacroAssembler::c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -106,7 +106,7 @@ using MacroAssembler::null_check;
|
||||
void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN;
|
||||
|
||||
// This platform only uses signal-based null checks. The Label is not needed.
|
||||
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
|
||||
void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
|
||||
|
||||
void load_parameter(int offset_in_words, Register reg);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -219,7 +219,7 @@ StubFrame::~StubFrame() {
|
||||
} else {
|
||||
__ should_not_reach_here();
|
||||
}
|
||||
_sasm = NULL;
|
||||
_sasm = nullptr;
|
||||
}
|
||||
|
||||
#undef __
|
||||
@ -259,7 +259,7 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
|
||||
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
|
||||
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
|
||||
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
|
||||
assert_cond(oop_map != NULL);
|
||||
assert_cond(oop_map != nullptr);
|
||||
|
||||
// caller save registers only, see FrameMap::initialize
|
||||
// in c1_FrameMap_riscv.cpp for detail.
|
||||
@ -368,7 +368,7 @@ void Runtime1::initialize_pd() {
|
||||
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
||||
// make a frame and preserve the caller's caller-save registers
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
assert_cond(oop_map != NULL);
|
||||
assert_cond(oop_map != nullptr);
|
||||
int call_offset = 0;
|
||||
if (!has_argument) {
|
||||
call_offset = __ call_RT(noreg, noreg, target);
|
||||
@ -378,7 +378,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
|
||||
call_offset = __ call_RT(noreg, noreg, target);
|
||||
}
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
|
||||
return oop_maps;
|
||||
@ -392,8 +392,8 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
const Register exception_pc = x13;
|
||||
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
OopMap* oop_map = NULL;
|
||||
assert_cond(oop_maps != nullptr);
|
||||
OopMap* oop_map = nullptr;
|
||||
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
@ -463,7 +463,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
// compute the exception handler.
|
||||
// the exception oop and the throwing pc are read from the fields in JavaThread
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
|
||||
guarantee(oop_map != NULL, "NULL oop_map!");
|
||||
guarantee(oop_map != nullptr, "null oop_map!");
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
|
||||
// x10: handler address
|
||||
@ -561,10 +561,10 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
// Note: This number affects also the RT-Call in generate_handle_exception because
|
||||
// the oop-map is shared for all calls.
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
assert_cond(oop_map != NULL);
|
||||
assert_cond(oop_map != nullptr);
|
||||
|
||||
__ mv(c_rarg0, xthread);
|
||||
Label retaddr;
|
||||
@ -578,7 +578,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
});
|
||||
__ bind(retaddr);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(__ offset(), oop_map);
|
||||
// verify callee-saved register
|
||||
#ifdef ASSERT
|
||||
@ -634,7 +634,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
bool save_fpu_registers = true;
|
||||
|
||||
// stub code & info for the different stubs
|
||||
OopMapSet* oop_maps = NULL;
|
||||
OopMapSet* oop_maps = nullptr;
|
||||
switch (id) {
|
||||
{
|
||||
case forward_exception_id:
|
||||
@ -676,10 +676,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
__ enter();
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers_except_r10(sasm);
|
||||
__ verify_oop(obj);
|
||||
@ -697,7 +697,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
Register method = x11;
|
||||
__ enter();
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
const int bci_off = 0;
|
||||
const int method_off = 1;
|
||||
@ -707,7 +707,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ ld(method, Address(fp, method_off * BytesPerWord));
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm);
|
||||
__ leave();
|
||||
@ -746,7 +746,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
__ enter();
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
int call_offset = 0;
|
||||
if (id == new_type_array_id) {
|
||||
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
|
||||
@ -755,7 +755,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers_except_r10(sasm);
|
||||
|
||||
@ -774,14 +774,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// x9: rank
|
||||
// x12: address of 1st dimension
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
__ mv(c_rarg1, x10);
|
||||
__ mv(c_rarg3, x12);
|
||||
__ mv(c_rarg2, x9);
|
||||
int call_offset = __ call_RT(x10, noreg, CAST_FROM_FN_PTR(address, new_multi_array), x11, x12, x13);
|
||||
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers_except_r10(sasm);
|
||||
|
||||
@ -810,10 +810,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ bind(register_finalizer);
|
||||
__ enter();
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
assert_cond(oop_map != NULL);
|
||||
assert_cond(oop_map != nullptr);
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), x10);
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
|
||||
// Now restore all the live registers
|
||||
@ -864,7 +864,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ ld(x10, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // super klass
|
||||
|
||||
Label miss;
|
||||
__ check_klass_subtype_slow_path(x14, x10, x12, x15, NULL, &miss);
|
||||
__ check_klass_subtype_slow_path(x14, x10, x12, x15, nullptr, &miss);
|
||||
|
||||
// fallthrough on success:
|
||||
__ mv(t0, 1);
|
||||
@ -886,7 +886,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
{
|
||||
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
|
||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
// Called with store_parameter and not C abi
|
||||
f.load_argument(1, x10); // x10: object
|
||||
@ -895,7 +895,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), x10, x11);
|
||||
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm, save_fpu_registers);
|
||||
}
|
||||
@ -908,7 +908,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
{
|
||||
StubFrame f(sasm, "monitorexit", dont_gc_arguments);
|
||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
// Called with store_parameter and not C abi
|
||||
f.load_argument(0, x10); // x10: lock address
|
||||
@ -919,7 +919,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), x10);
|
||||
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm, save_fpu_registers);
|
||||
}
|
||||
@ -929,16 +929,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
{
|
||||
StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
assert_cond(oop_map != NULL);
|
||||
assert_cond(oop_map != nullptr);
|
||||
f.load_argument(0, c_rarg1);
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);
|
||||
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
restore_live_registers(sasm);
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
__ leave();
|
||||
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|
||||
}
|
||||
@ -1028,16 +1028,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
|
||||
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
|
||||
oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm);
|
||||
__ leave();
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
|
||||
|
||||
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,7 +36,7 @@ int C2SafepointPollStub::max_size() const {
|
||||
}
|
||||
|
||||
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
RuntimeAddress callback_addr(stub);
|
||||
|
@ -551,16 +551,16 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle,
|
||||
sub(t0, needle_len, 16); // small patterns still should be handled by simple algorithm
|
||||
bltz(t0, LINEARSEARCH);
|
||||
mv(result, zr);
|
||||
RuntimeAddress stub = NULL;
|
||||
RuntimeAddress stub = nullptr;
|
||||
if (isLL) {
|
||||
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ll());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_ll stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_ll stub has not been generated");
|
||||
} else if (needle_isL) {
|
||||
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_ul());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_ul stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_ul stub has not been generated");
|
||||
} else {
|
||||
stub = RuntimeAddress(StubRoutines::riscv::string_indexof_linear_uu());
|
||||
assert(stub.target() != NULL, "string_indexof_linear_uu stub has not been generated");
|
||||
assert(stub.target() != nullptr, "string_indexof_linear_uu stub has not been generated");
|
||||
}
|
||||
address call = trampoline_call(stub);
|
||||
if (call == nullptr) {
|
||||
@ -952,7 +952,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
}
|
||||
|
||||
bind(STUB);
|
||||
RuntimeAddress stub = NULL;
|
||||
RuntimeAddress stub = nullptr;
|
||||
switch (ae) {
|
||||
case StrIntrinsicNode::LL:
|
||||
stub = RuntimeAddress(StubRoutines::riscv::compare_long_string_LL());
|
||||
@ -969,7 +969,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
assert(stub.target() != NULL, "compare_long_string stub has not been generated");
|
||||
assert(stub.target() != nullptr, "compare_long_string stub has not been generated");
|
||||
address call = trampoline_call(stub);
|
||||
if (call == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START));
|
||||
@ -1211,21 +1211,21 @@ static conditional_branch_insn conditional_branches[] =
|
||||
/* SHORT branches */
|
||||
(conditional_branch_insn)&MacroAssembler::beq,
|
||||
(conditional_branch_insn)&MacroAssembler::bgt,
|
||||
NULL, // BoolTest::overflow
|
||||
nullptr, // BoolTest::overflow
|
||||
(conditional_branch_insn)&MacroAssembler::blt,
|
||||
(conditional_branch_insn)&MacroAssembler::bne,
|
||||
(conditional_branch_insn)&MacroAssembler::ble,
|
||||
NULL, // BoolTest::no_overflow
|
||||
nullptr, // BoolTest::no_overflow
|
||||
(conditional_branch_insn)&MacroAssembler::bge,
|
||||
|
||||
/* UNSIGNED branches */
|
||||
(conditional_branch_insn)&MacroAssembler::beq,
|
||||
(conditional_branch_insn)&MacroAssembler::bgtu,
|
||||
NULL,
|
||||
nullptr,
|
||||
(conditional_branch_insn)&MacroAssembler::bltu,
|
||||
(conditional_branch_insn)&MacroAssembler::bne,
|
||||
(conditional_branch_insn)&MacroAssembler::bleu,
|
||||
NULL,
|
||||
nullptr,
|
||||
(conditional_branch_insn)&MacroAssembler::bgeu
|
||||
};
|
||||
|
||||
@ -1234,21 +1234,21 @@ static float_conditional_branch_insn float_conditional_branches[] =
|
||||
/* FLOAT SHORT branches */
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_beq,
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_bgt,
|
||||
NULL, // BoolTest::overflow
|
||||
nullptr, // BoolTest::overflow
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_blt,
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_bne,
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_ble,
|
||||
NULL, // BoolTest::no_overflow
|
||||
nullptr, // BoolTest::no_overflow
|
||||
(float_conditional_branch_insn)&MacroAssembler::float_bge,
|
||||
|
||||
/* DOUBLE SHORT branches */
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_beq,
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_bgt,
|
||||
NULL,
|
||||
nullptr,
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_blt,
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_bne,
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_ble,
|
||||
NULL,
|
||||
nullptr,
|
||||
(float_conditional_branch_insn)&MacroAssembler::double_bge
|
||||
};
|
||||
|
||||
@ -1661,9 +1661,9 @@ void C2_MacroAssembler::reduce_minmax_FD_v(FloatRegister dst,
|
||||
}
|
||||
|
||||
bool C2_MacroAssembler::in_scratch_emit_size() {
|
||||
if (ciEnv::current()->task() != NULL) {
|
||||
if (ciEnv::current()->task() != nullptr) {
|
||||
PhaseOutput* phase_output = Compile::current()->output();
|
||||
if (phase_output != NULL && phase_output->in_scratch_emit_size()) {
|
||||
if (phase_output != nullptr && phase_output->in_scratch_emit_size()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
// mv xmethod, 0
|
||||
// jalr -4 # to self
|
||||
|
||||
if (mark == NULL) {
|
||||
if (mark == nullptr) {
|
||||
mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||
}
|
||||
|
||||
@ -54,8 +54,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
|
||||
address base = __ start_a_stub(to_interp_stub_size());
|
||||
int offset = __ offset();
|
||||
if (base == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
if (base == nullptr) {
|
||||
return nullptr; // CodeBuffer::expand failed
|
||||
}
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
@ -86,7 +86,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
|
||||
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
guarantee(stub != nullptr, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
@ -114,7 +114,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
assert(stub != nullptr, "stub not found");
|
||||
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
@ -135,7 +135,7 @@ void CompiledDirectStaticCall::verify() {
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
assert(stub != nullptr, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub);
|
||||
|
@ -147,7 +147,7 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|
||||
// so we compute locals "from scratch" rather than relativizing the value in the stack frame, which might include padding,
|
||||
// since we don't freeze the padding word (see recurse_freeze_interpreted_frame).
|
||||
|
||||
// at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts
|
||||
// at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts
|
||||
*hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp();
|
||||
// this line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197
|
||||
*hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -40,7 +40,7 @@ static const char* pd_cpu_opts() {
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
// and do it right.
|
||||
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
|
||||
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) {
|
||||
static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
|
||||
return here;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ public:
|
||||
_captured_state_mask(captured_state_mask),
|
||||
_frame_complete(0),
|
||||
_frame_size_slots(0),
|
||||
_oop_maps(NULL) {
|
||||
_oop_maps(nullptr) {
|
||||
}
|
||||
|
||||
void generate();
|
||||
|
@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
// to construct the sender and do some validation of it. This goes a long way
|
||||
// toward eliminating issues when we get in frame construction code
|
||||
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
|
||||
// First check if frame is complete and tester is reliable
|
||||
// Unfortunately we can only check frame complete for runtime stubs and nmethod
|
||||
@ -119,10 +119,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
return fp_safe && is_entry_frame_valid(thread);
|
||||
}
|
||||
|
||||
intptr_t* sender_sp = NULL;
|
||||
intptr_t* sender_unextended_sp = NULL;
|
||||
address sender_pc = NULL;
|
||||
intptr_t* saved_fp = NULL;
|
||||
intptr_t* sender_sp = nullptr;
|
||||
intptr_t* sender_unextended_sp = nullptr;
|
||||
address sender_pc = nullptr;
|
||||
intptr_t* saved_fp = nullptr;
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
// fp must be safe
|
||||
@ -182,7 +182,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
if (sender_pc == nullptr || sender_blob == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
@ -250,7 +250,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
|
||||
if ((address)this->fp()[return_addr_offset] == NULL) { return false; }
|
||||
if ((address)this->fp()[return_addr_offset] == nullptr) { return false; }
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -274,7 +274,7 @@ void frame::patch_pc(Thread* thread, address pc) {
|
||||
*pc_addr = pc;
|
||||
_pc = pc; // must be set before call to get_deopt_original_pc
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
assert(original_pc == old_pc, "expected original PC to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
_pc = original_pc;
|
||||
@ -339,7 +339,7 @@ void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
|
||||
}
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
|
||||
@ -365,11 +365,11 @@ bool frame::upcall_stub_frame_is_first() const {
|
||||
assert(is_upcall_stub_frame(), "must be optimzed entry frame");
|
||||
UpcallStub* blob = _cb->as_upcall_stub();
|
||||
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
|
||||
return jfa->last_Java_sp() == NULL;
|
||||
return jfa->last_Java_sp() == nullptr;
|
||||
}
|
||||
|
||||
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
UpcallStub* blob = _cb->as_upcall_stub();
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
@ -400,7 +400,7 @@ void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
assert_cond(nm != NULL);
|
||||
assert_cond(nm != nullptr);
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
@ -415,9 +415,9 @@ void frame::adjust_unextended_sp() {
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
|
||||
if (sender_cm != NULL) {
|
||||
if (sender_cm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_cm->is_deopt_entry(_pc) ||
|
||||
sender_cm->is_deopt_mh_entry(_pc)) {
|
||||
@ -440,7 +440,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
||||
|
||||
#ifdef COMPILER2
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
if (map->update_map()) {
|
||||
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
|
||||
}
|
||||
@ -460,10 +460,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
// These are reasonable sanity checks
|
||||
if (fp() == NULL || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sp() == NULL || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
|
||||
@ -522,7 +522,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
Method* method = interpreter_frame_method();
|
||||
BasicType type = method->result_type();
|
||||
|
||||
intptr_t* tos_addr = NULL;
|
||||
intptr_t* tos_addr = nullptr;
|
||||
if (method->is_native()) {
|
||||
tos_addr = (intptr_t*)sp();
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
@ -541,7 +541,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
|
||||
} else {
|
||||
oop* obj_p = (oop*)tos_addr;
|
||||
obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
|
||||
obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
|
||||
}
|
||||
assert(Universe::is_in_heap_or_null(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
@ -610,7 +610,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
|
||||
intptr_t *frame::initial_deoptimization_info() {
|
||||
// Not used on riscv, but we must return something.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#undef DESCRIBE_FP_OFFSET
|
||||
@ -625,11 +625,11 @@ frame::frame(void* ptr_sp, void* ptr_fp, void* pc) : _on_heap(false) {
|
||||
|
||||
void JavaFrameAnchor::make_walkable() {
|
||||
// last frame set?
|
||||
if (last_Java_sp() == NULL) { return; }
|
||||
if (last_Java_sp() == nullptr) { return; }
|
||||
// already walkable?
|
||||
if (walkable()) { return; }
|
||||
vmassert(last_Java_sp() != NULL, "not called from Java code?");
|
||||
vmassert(last_Java_pc() == NULL, "already walkable");
|
||||
vmassert(last_Java_sp() != nullptr, "not called from Java code?");
|
||||
vmassert(last_Java_pc() == nullptr, "already walkable");
|
||||
_last_Java_pc = (address)_last_Java_sp[-1];
|
||||
vmassert(walkable(), "something went wrong");
|
||||
}
|
||||
|
@ -39,11 +39,11 @@
|
||||
// Constructors:
|
||||
|
||||
inline frame::frame() {
|
||||
_pc = NULL;
|
||||
_sp = NULL;
|
||||
_unextended_sp = NULL;
|
||||
_fp = NULL;
|
||||
_cb = NULL;
|
||||
_pc = nullptr;
|
||||
_sp = nullptr;
|
||||
_unextended_sp = nullptr;
|
||||
_fp = nullptr;
|
||||
_cb = nullptr;
|
||||
_deopt_state = unknown;
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
@ -58,11 +58,11 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
|
||||
_unextended_sp = ptr_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = pc;
|
||||
_oop_map = NULL;
|
||||
_oop_map = nullptr;
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
assert(pc != NULL, "no pc?");
|
||||
assert(pc != nullptr, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
setup(pc);
|
||||
}
|
||||
@ -71,10 +71,10 @@ inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
|
||||
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
} else {
|
||||
if (_cb == SharedRuntime::deopt_blob()) {
|
||||
@ -96,10 +96,10 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
assert(pc != nullptr, "no pc?");
|
||||
_cb = cb;
|
||||
_oop_map = NULL;
|
||||
assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
|
||||
_oop_map = nullptr;
|
||||
assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
setup(pc);
|
||||
@ -119,7 +119,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
|
||||
|
||||
// In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
|
||||
assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
|
||||
if (cb != NULL) {
|
||||
if (cb != nullptr) {
|
||||
setup(pc);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -138,10 +138,10 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp,
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
assert(pc != nullptr, "no pc?");
|
||||
_cb = CodeCache::find_blob_fast(pc);
|
||||
assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(ptr_sp), p2i(unextended_sp), p2i(ptr_fp));
|
||||
_oop_map = NULL;
|
||||
assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(ptr_sp), p2i(unextended_sp), p2i(ptr_fp));
|
||||
_oop_map = nullptr;
|
||||
_on_heap = false;
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
@ -172,7 +172,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
@ -192,19 +192,19 @@ inline bool frame::equal(frame other) const {
|
||||
}
|
||||
|
||||
// Return unique id for this frame. The id must have a value where we can distinguish
|
||||
// identity and younger/older relationship. NULL represents an invalid (incomparable)
|
||||
// identity and younger/older relationship. null represents an invalid (incomparable)
|
||||
// frame.
|
||||
inline intptr_t* frame::id(void) const { return unextended_sp(); }
|
||||
|
||||
// Return true if the frame is older (less recent activation) than the frame represented by id
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
|
||||
return this->id() > id ; }
|
||||
|
||||
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
|
||||
|
||||
inline intptr_t* frame::link_or_null() const {
|
||||
intptr_t** ptr = (intptr_t **)addr_at(link_offset);
|
||||
return os::is_readable_pointer(ptr) ? *ptr : NULL;
|
||||
return os::is_readable_pointer(ptr) ? *ptr : nullptr;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
|
||||
@ -213,7 +213,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); return
|
||||
inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
|
||||
|
||||
inline intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if (size > 0) {
|
||||
@ -237,7 +237,7 @@ inline int frame::compiled_frame_stack_argsize() const {
|
||||
}
|
||||
|
||||
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
|
||||
assert(mask != NULL, "");
|
||||
assert(mask != nullptr, "");
|
||||
Method* m = interpreter_frame_method();
|
||||
int bci = interpreter_frame_bci();
|
||||
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
|
||||
@ -287,7 +287,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
if (last_sp == NULL) {
|
||||
if (last_sp == nullptr) {
|
||||
return sp();
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
@ -326,13 +326,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
|
||||
// Compiled frames
|
||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||
oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr);
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
guarantee(result_adr != nullptr, "bad register save location");
|
||||
return (*result_adr);
|
||||
}
|
||||
|
||||
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
||||
oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr);
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
guarantee(result_adr != nullptr, "bad register save location");
|
||||
*result_adr = obj;
|
||||
}
|
||||
|
||||
@ -345,17 +345,17 @@ inline int frame::sender_sp_ret_address_offset() {
|
||||
}
|
||||
|
||||
inline const ImmutableOopMap* frame::get_oop_map() const {
|
||||
if (_cb == NULL) return NULL;
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
if (_cb == nullptr) return nullptr;
|
||||
if (_cb->oop_maps() != nullptr) {
|
||||
NativePostCallNop* nop = nativePostCallNop_at(_pc);
|
||||
if (nop != NULL && nop->displacement() != 0) {
|
||||
if (nop != nullptr && nop->displacement() != 0) {
|
||||
int slot = ((nop->displacement() >> 24) & 0xff);
|
||||
return _cb->oop_map_for_slot(slot, _pc);
|
||||
}
|
||||
const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
|
||||
return oop_map;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@ -375,7 +375,7 @@ frame frame::sender(RegisterMap* map) const {
|
||||
frame frame::sender_raw(RegisterMap* map) const {
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
map->set_include_argument_oops(false);
|
||||
|
||||
if (map->in_cont()) { // already in an h-stack
|
||||
@ -393,7 +393,7 @@ frame frame::sender_raw(RegisterMap* map) const {
|
||||
}
|
||||
|
||||
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
|
||||
if (_cb != NULL) {
|
||||
if (_cb != nullptr) {
|
||||
return sender_for_compiled_frame(map);
|
||||
}
|
||||
|
||||
@ -421,20 +421,20 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
|
||||
intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp + frame::link_offset);
|
||||
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
if (map->update_map()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
// For C1, the runtime stub might not have oop maps, so set this flag
|
||||
// outside of update_register_map.
|
||||
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
|
||||
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
|
||||
if (oop_map() != NULL) {
|
||||
if (oop_map() != nullptr) {
|
||||
_oop_map->update_register_map(this, map);
|
||||
}
|
||||
} else {
|
||||
assert(!_cb->caller_must_gc_arguments(map->thread()), "");
|
||||
assert(!map->include_argument_oops(), "");
|
||||
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
|
||||
assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
|
||||
}
|
||||
|
||||
// Since the prolog does the save and restore of FP there is no
|
||||
@ -460,7 +460,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
// frame::update_map_with_saved_link
|
||||
template <typename RegisterMapT>
|
||||
void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(map != nullptr, "map must be set");
|
||||
// The interpreter and compiler(s) always save FP in a known
|
||||
// location on entry. C2-compiled code uses FP as an allocatable
|
||||
// callee-saved register. We must record where that location is so
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -198,11 +198,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
__ srli(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ beqz(tmp1, done);
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
// crosses regions, storing null?
|
||||
|
||||
__ beqz(new_val, done);
|
||||
|
||||
// storing region crossing non-NULL, is card already dirty?
|
||||
// storing region crossing non-null, is card already dirty?
|
||||
|
||||
ExternalAddress cardtable((address) ct->byte_map_base());
|
||||
const Register card_addr = tmp1;
|
||||
@ -223,7 +223,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
__ lbu(tmp2, Address(card_addr));
|
||||
__ beqz(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// storing a region crossing, non-null oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ sb(zr, Address(card_addr));
|
||||
@ -415,7 +415,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// At this point we know new_value is non-NULL and the new_value crosses regions.
|
||||
// At this point we know new_value is non-null and the new_value crosses regions.
|
||||
// Must check to see if card is already dirty
|
||||
const Register thread = xthread;
|
||||
|
||||
@ -446,7 +446,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ lbu(t0, Address(card_addr, 0));
|
||||
__ beqz(t0, done);
|
||||
|
||||
// storing region crossing non-NULL, card is clean.
|
||||
// storing region crossing non-null, card is clean.
|
||||
// dirty card and log.
|
||||
__ sb(zr, Address(card_addr, 0));
|
||||
|
||||
|
@ -240,7 +240,7 @@ void BarrierSetAssembler::clear_patching_epoch() {
|
||||
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
if (bs_nm == NULL) {
|
||||
if (bs_nm == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
Label local_guard;
|
||||
NMethodPatchingType patching_type = nmethod_patching_type();
|
||||
|
||||
if (slow_path == NULL) {
|
||||
if (slow_path == nullptr) {
|
||||
guard = &local_guard;
|
||||
|
||||
// RISCV atomic operations require that the memory address be naturally aligned.
|
||||
@ -304,7 +304,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (slow_path == NULL) {
|
||||
if (slow_path == nullptr) {
|
||||
Label skip_barrier;
|
||||
__ beq(t0, t1, skip_barrier);
|
||||
|
||||
@ -327,7 +327,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
|
||||
|
||||
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
|
||||
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs == NULL) {
|
||||
if (bs == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then ebp might be pointing to
|
||||
@ -282,7 +282,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
address target = NULL;
|
||||
address target = nullptr;
|
||||
if (is_strong) {
|
||||
if (is_narrow) {
|
||||
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
|
||||
@ -467,7 +467,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
// b) A parallel thread may heal the contents of addr, replacing a
|
||||
// from-space pointer held in addr with the to-space pointer
|
||||
// representing the new location of the object.
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals null
|
||||
// or it refers to an object that is not being evacuated out of
|
||||
// from-space, or it refers to the to-space version of an object that
|
||||
// is being evacuated out of from-space.
|
||||
@ -680,7 +680,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
|
||||
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
|
||||
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
|
||||
bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
|
||||
address target = NULL;
|
||||
address target = nullptr;
|
||||
if (is_strong) {
|
||||
if (is_native) {
|
||||
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
|
||||
define_pd_global(bool, TrapBasedNullChecks, false);
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls past to check cast
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -43,7 +43,7 @@ int InlineCacheBuffer::ic_stub_code_size() {
|
||||
#define __ masm->
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
assert_cond(code_begin != NULL && entry_point != NULL);
|
||||
assert_cond(code_begin != nullptr && entry_point != nullptr);
|
||||
ResourceMark rm;
|
||||
CodeBuffer code(code_begin, ic_stub_code_size());
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
@ -85,7 +85,7 @@ void InterpreterMacroAssembler::narrow(Register result) {
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
assert(entry != NULL, "Entry must have been generated by now");
|
||||
assert(entry != nullptr, "Entry must have been generated by now");
|
||||
j(entry);
|
||||
}
|
||||
|
||||
@ -156,7 +156,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
Label L;
|
||||
ld(t0, Address(xthread, JavaThread::jvmti_thread_state_offset()));
|
||||
beqz(t0, L); // if [thread->jvmti_thread_state() == NULL] then exit
|
||||
beqz(t0, L); // if thread->jvmti_thread_state() is null then exit
|
||||
|
||||
// Initiate earlyret handling only if it is not already being processed.
|
||||
// If the flag has the earlyret_processing bit set, it means that this code
|
||||
@ -819,7 +819,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
assert(lock_offset == 0,
|
||||
"displached header must be first word in BasicObjectLock");
|
||||
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 7) == 0, and
|
||||
@ -899,7 +899,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
||||
beqz(header_reg, count);
|
||||
|
||||
// Atomic swap back the old header
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
|
||||
@ -930,7 +930,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
|
||||
Label set_mdp;
|
||||
push_reg(RegSet::of(x10, x11), sp); // save x10, x11
|
||||
|
||||
// Test MDO to avoid the call if it is NULL.
|
||||
// Test MDO to avoid the call if it is null.
|
||||
ld(x10, Address(xmethod, in_bytes(Method::method_data_offset())));
|
||||
beqz(x10, set_mdp);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), xmethod, xbcp);
|
||||
@ -1301,7 +1301,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(
|
||||
}
|
||||
|
||||
// In the fall-through case, we found no matching item, but we
|
||||
// observed the item[start_row] is NULL.
|
||||
// observed the item[start_row] is null.
|
||||
// Fill in the item field and increment the count.
|
||||
int item_offset = in_bytes(item_offset_fn(start_row));
|
||||
set_mdp_data_at(mdp, item_offset, item);
|
||||
@ -1319,19 +1319,19 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(
|
||||
// row[0].incr()
|
||||
// goto done
|
||||
// ]
|
||||
// if (row[0].rec != NULL) then [
|
||||
// if (row[0].rec != nullptr) then [
|
||||
// # inner copy of decision tree, rooted at row[1]
|
||||
// if (row[1].rec == rec) then [
|
||||
// row[1].incr()
|
||||
// goto done
|
||||
// ]
|
||||
// if (row[1].rec != NULL) then [
|
||||
// if (row[1].rec != nullptr) then [
|
||||
// # degenerate decision tree, rooted at row[2]
|
||||
// if (row[2].rec == rec) then [
|
||||
// row[2].incr()
|
||||
// goto done
|
||||
// ]
|
||||
// if (row[2].rec != NULL) then [
|
||||
// if (row[2].rec != nullptr) then [
|
||||
// count.incr()
|
||||
// goto done
|
||||
// ] # overflow
|
||||
@ -1613,7 +1613,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
|
||||
ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
beqz(t0, L);
|
||||
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
|
||||
" last_sp != NULL");
|
||||
" last_sp isn't null");
|
||||
bind(L);
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
@ -1640,7 +1640,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
||||
ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
beqz(t0, L);
|
||||
stop("InterpreterMacroAssembler::call_VM_base:"
|
||||
" last_sp != NULL");
|
||||
" last_sp isn't null");
|
||||
bind(L);
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
|
@ -164,7 +164,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void empty_expression_stack() {
|
||||
ld(esp, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
// null last_sp until next java call
|
||||
sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -227,7 +227,7 @@ class SlowSignatureHandler
|
||||
|
||||
virtual void pass_object() {
|
||||
intptr_t* addr = single_slot_addr();
|
||||
intptr_t value = *addr == 0 ? NULL : (intptr_t)addr;
|
||||
intptr_t value = *addr == 0 ? (intptr_t)nullptr : (intptr_t)addr;
|
||||
if (pass_gpr(value) < 0) {
|
||||
pass_stack(value);
|
||||
}
|
||||
@ -269,11 +269,11 @@ class SlowSignatureHandler
|
||||
|
||||
~SlowSignatureHandler()
|
||||
{
|
||||
_from = NULL;
|
||||
_to = NULL;
|
||||
_int_args = NULL;
|
||||
_fp_args = NULL;
|
||||
_fp_identifiers = NULL;
|
||||
_from = nullptr;
|
||||
_to = nullptr;
|
||||
_int_args = nullptr;
|
||||
_fp_args = nullptr;
|
||||
_fp_identifiers = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -40,10 +40,10 @@ public:
|
||||
|
||||
void clear(void) {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
_last_Java_fp = NULL;
|
||||
_last_Java_pc = NULL;
|
||||
_last_Java_fp = nullptr;
|
||||
_last_Java_pc = nullptr;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
@ -51,12 +51,12 @@ public:
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// To act like previous version (pd_cache_state) don't null _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
assert(src != NULL, "Src should not be NULL.");
|
||||
assert(src != nullptr, "Src should not be null.");
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = NULL;
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
@ -65,7 +65,7 @@ public:
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
}
|
||||
|
||||
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
|
||||
bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; }
|
||||
|
||||
void make_walkable();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -65,7 +65,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
|
||||
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
|
||||
default: ShouldNotReachHere();
|
||||
name = NULL; // unreachable
|
||||
name = nullptr; // unreachable
|
||||
}
|
||||
ResourceMark rm;
|
||||
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
|
||||
@ -112,7 +112,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
// Both robj and t0 are clobbered by try_resolve_jobject_in_native.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
assert_cond(bs != NULL);
|
||||
assert_cond(bs != nullptr);
|
||||
bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, t0, slow);
|
||||
|
||||
__ srli(roffset, c_rarg2, 2); // offset
|
||||
@ -168,7 +168,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
|
||||
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
|
||||
default: ShouldNotReachHere();
|
||||
slow_case_addr = NULL; // unreachable
|
||||
slow_case_addr = nullptr; // unreachable
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -255,7 +255,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
Register last_java_fp,
|
||||
address last_java_pc,
|
||||
Register tmp) {
|
||||
assert(last_java_pc != NULL, "must provide a valid PC");
|
||||
assert(last_java_pc != nullptr, "must provide a valid PC");
|
||||
|
||||
la(tmp, last_java_pc);
|
||||
sd(tmp, Address(xthread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
|
||||
@ -364,13 +364,13 @@ void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thr
|
||||
}
|
||||
|
||||
void MacroAssembler::clinit_barrier(Register klass, Register tmp, Label* L_fast_path, Label* L_slow_path) {
|
||||
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
|
||||
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
|
||||
assert_different_registers(klass, xthread, tmp);
|
||||
|
||||
Label L_fallthrough, L_tmp;
|
||||
if (L_fast_path == NULL) {
|
||||
if (L_fast_path == nullptr) {
|
||||
L_fast_path = &L_fallthrough;
|
||||
} else if (L_slow_path == NULL) {
|
||||
} else if (L_slow_path == nullptr) {
|
||||
L_slow_path = &L_fallthrough;
|
||||
}
|
||||
|
||||
@ -397,7 +397,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
|
||||
if (!VerifyOops) { return; }
|
||||
|
||||
// Pass register number to verify_oop_subroutine
|
||||
const char* b = NULL;
|
||||
const char* b = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -436,7 +436,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
|
||||
return;
|
||||
}
|
||||
|
||||
const char* b = NULL;
|
||||
const char* b = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -560,7 +560,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
|
||||
assert_different_registers(value, tmp1, tmp2);
|
||||
Label done, tagged, weak_tagged;
|
||||
|
||||
beqz(value, done); // Use NULL as-is.
|
||||
beqz(value, done); // Use null as-is.
|
||||
// Test for tag.
|
||||
andi(t0, value, JNIHandles::tag_mask);
|
||||
bnez(t0, tagged);
|
||||
@ -593,7 +593,7 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis
|
||||
assert_different_registers(value, tmp1, tmp2);
|
||||
Label done;
|
||||
|
||||
beqz(value, done); // Use NULL as-is.
|
||||
beqz(value, done); // Use null as-is.
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -620,7 +620,7 @@ void MacroAssembler::stop(const char* msg) {
|
||||
}
|
||||
|
||||
void MacroAssembler::unimplemented(const char* what) {
|
||||
const char* buf = NULL;
|
||||
const char* buf = nullptr;
|
||||
{
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
@ -635,7 +635,7 @@ void MacroAssembler::emit_static_call_stub() {
|
||||
// CompiledDirectStaticCall::set_to_interpreted knows the
|
||||
// exact layout of this stub.
|
||||
|
||||
mov_metadata(xmethod, (Metadata*)NULL);
|
||||
mov_metadata(xmethod, (Metadata*)nullptr);
|
||||
|
||||
// Jump to the entry point of the c2i stub.
|
||||
int32_t offset = 0;
|
||||
@ -648,7 +648,7 @@ void MacroAssembler::call_VM_leaf_base(address entry_point,
|
||||
Label *retaddr) {
|
||||
push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp
|
||||
call(entry_point);
|
||||
if (retaddr != NULL) {
|
||||
if (retaddr != nullptr) {
|
||||
bind(*retaddr);
|
||||
}
|
||||
pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp
|
||||
@ -833,7 +833,7 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
|
||||
|
||||
#define INSN(NAME, REGISTER) \
|
||||
void MacroAssembler::NAME(const address dest, Register temp) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm21(distance) && ((distance % 2) == 0)) { \
|
||||
Assembler::jal(REGISTER, distance); \
|
||||
@ -877,7 +877,7 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
|
||||
|
||||
#define INSN(NAME) \
|
||||
void MacroAssembler::NAME(Register Rd, const address dest, Register temp) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm21(distance) && ((distance % 2) == 0)) { \
|
||||
Assembler::NAME(Rd, distance); \
|
||||
@ -1415,7 +1415,7 @@ int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
|
||||
}
|
||||
|
||||
static long get_offset_of_jal(address insn_addr) {
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
long offset = 0;
|
||||
unsigned insn = *(unsigned*)insn_addr;
|
||||
long val = (long)Assembler::sextract(insn, 31, 12);
|
||||
@ -1429,7 +1429,7 @@ static long get_offset_of_jal(address insn_addr) {
|
||||
|
||||
static long get_offset_of_conditional_branch(address insn_addr) {
|
||||
long offset = 0;
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
unsigned insn = *(unsigned*)insn_addr;
|
||||
offset = (long)Assembler::sextract(insn, 31, 31);
|
||||
offset = (offset << 12) | (((long)(Assembler::sextract(insn, 7, 7) & 0x1)) << 11);
|
||||
@ -1441,7 +1441,7 @@ static long get_offset_of_conditional_branch(address insn_addr) {
|
||||
|
||||
static long get_offset_of_pc_relative(address insn_addr) {
|
||||
long offset = 0;
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
offset = ((long)(Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12))) << 12; // Auipc.
|
||||
offset += ((long)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addi/Jalr/Load.
|
||||
offset = (offset << 32) >> 32;
|
||||
@ -1449,7 +1449,7 @@ static long get_offset_of_pc_relative(address insn_addr) {
|
||||
}
|
||||
|
||||
static address get_target_of_movptr(address insn_addr) {
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 29; // Lui.
|
||||
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 17; // Addi.
|
||||
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 6; // Addi.
|
||||
@ -1458,7 +1458,7 @@ static address get_target_of_movptr(address insn_addr) {
|
||||
}
|
||||
|
||||
static address get_target_of_li64(address insn_addr) {
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 44; // Lui.
|
||||
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)) << 32; // Addi.
|
||||
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[3], 31, 20)) << 20; // Addi.
|
||||
@ -1468,7 +1468,7 @@ static address get_target_of_li64(address insn_addr) {
|
||||
}
|
||||
|
||||
address MacroAssembler::get_target_of_li32(address insn_addr) {
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
intptr_t target_address = (((int64_t)Assembler::sextract(((unsigned*)insn_addr)[0], 31, 12)) & 0xfffff) << 12; // Lui.
|
||||
target_address += ((int64_t)Assembler::sextract(((unsigned*)insn_addr)[1], 31, 20)); // Addiw.
|
||||
return (address)target_address;
|
||||
@ -1477,7 +1477,7 @@ address MacroAssembler::get_target_of_li32(address insn_addr) {
|
||||
// Patch any kind of instruction; there may be several instructions.
|
||||
// Return the total length (in bytes) of the instructions.
|
||||
int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
assert_cond(branch != NULL);
|
||||
assert_cond(branch != nullptr);
|
||||
int64_t offset = target - branch;
|
||||
if (NativeInstruction::is_jal_at(branch)) { // jal
|
||||
return patch_offset_in_jal(branch, offset);
|
||||
@ -1505,7 +1505,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
|
||||
address MacroAssembler::target_addr_for_insn(address insn_addr) {
|
||||
long offset = 0;
|
||||
assert_cond(insn_addr != NULL);
|
||||
assert_cond(insn_addr != nullptr);
|
||||
if (NativeInstruction::is_jal_at(insn_addr)) { // jal
|
||||
offset = get_offset_of_jal(insn_addr);
|
||||
} else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
|
||||
@ -1879,7 +1879,7 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1
|
||||
assert_different_registers(oop, trial_klass, tmp1, tmp2);
|
||||
if (UseCompressedClassPointers) {
|
||||
lwu(tmp1, Address(oop, oopDesc::klass_offset_in_bytes()));
|
||||
if (CompressedKlassPointers::base() == NULL) {
|
||||
if (CompressedKlassPointers::base() == nullptr) {
|
||||
slli(tmp1, tmp1, CompressedKlassPointers::shift());
|
||||
beq(trial_klass, tmp1, L);
|
||||
return;
|
||||
@ -1894,7 +1894,7 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1
|
||||
// Move an oop into a register.
|
||||
void MacroAssembler::movoop(Register dst, jobject obj) {
|
||||
int oop_index;
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
oop_index = oop_recorder()->allocate_oop_index(obj);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
@ -1918,7 +1918,7 @@ void MacroAssembler::movoop(Register dst, jobject obj) {
|
||||
// Move a metadata address into a register.
|
||||
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
|
||||
int oop_index;
|
||||
if (obj == NULL) {
|
||||
if (obj == nullptr) {
|
||||
oop_index = oop_recorder()->allocate_metadata_index(obj);
|
||||
} else {
|
||||
oop_index = oop_recorder()->find_index(obj);
|
||||
@ -1974,7 +1974,7 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value
|
||||
|
||||
SkipIfEqual::~SkipIfEqual() {
|
||||
_masm->bind(_label);
|
||||
_masm = NULL;
|
||||
_masm = nullptr;
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
|
||||
@ -2023,13 +2023,13 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
|
||||
|
||||
void MacroAssembler::null_check(Register reg, int offset) {
|
||||
if (needs_explicit_null_check(offset)) {
|
||||
// provoke OS NULL exception if reg = NULL by
|
||||
// provoke OS null exception if reg is null by
|
||||
// accessing M[reg] w/o changing any registers
|
||||
// NOTE: this is plenty to provoke a segv
|
||||
ld(zr, Address(reg, 0));
|
||||
} else {
|
||||
// nothing to do, (later) access of M[reg + offset]
|
||||
// will provoke OS NULL exception if reg = NULL
|
||||
// will provoke OS null exception if reg is null
|
||||
}
|
||||
}
|
||||
|
||||
@ -2049,7 +2049,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
|
||||
// Algorithm must match CompressedOops::encode.
|
||||
void MacroAssembler::encode_heap_oop(Register d, Register s) {
|
||||
verify_oop_msg(s, "broken oop in encode_heap_oop");
|
||||
if (CompressedOops::base() == NULL) {
|
||||
if (CompressedOops::base() == nullptr) {
|
||||
if (CompressedOops::shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
|
||||
srli(d, s, LogMinObjAlignmentInBytes);
|
||||
@ -2106,7 +2106,7 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
|
||||
void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) {
|
||||
assert(UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
|
||||
if (CompressedKlassPointers::base() == NULL) {
|
||||
if (CompressedKlassPointers::base() == nullptr) {
|
||||
if (CompressedKlassPointers::shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
|
||||
slli(dst, src, LogKlassAlignmentInBytes);
|
||||
@ -2141,7 +2141,7 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
|
||||
void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) {
|
||||
assert(UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
|
||||
if (CompressedKlassPointers::base() == NULL) {
|
||||
if (CompressedKlassPointers::base() == nullptr) {
|
||||
if (CompressedKlassPointers::shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
|
||||
srli(dst, src, LogKlassAlignmentInBytes);
|
||||
@ -2177,24 +2177,24 @@ void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
||||
|
||||
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
||||
assert(UseCompressedOops, "should only be used for compressed headers");
|
||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||
assert(Universe::heap() != nullptr, "java heap should be initialized");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
if (CompressedOops::shift() != 0) {
|
||||
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
|
||||
slli(dst, src, LogMinObjAlignmentInBytes);
|
||||
if (CompressedOops::base() != NULL) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
add(dst, xheapbase, dst);
|
||||
}
|
||||
} else {
|
||||
assert(CompressedOops::base() == NULL, "sanity");
|
||||
assert(CompressedOops::base() == nullptr, "sanity");
|
||||
mv(dst, src);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_heap_oop(Register d, Register s) {
|
||||
if (CompressedOops::base() == NULL) {
|
||||
if (CompressedOops::base() == nullptr) {
|
||||
if (CompressedOops::shift() != 0 || d != s) {
|
||||
slli(d, s, CompressedOops::shift());
|
||||
}
|
||||
@ -2223,7 +2223,7 @@ void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register
|
||||
access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL, dst, src, tmp1, tmp2);
|
||||
}
|
||||
|
||||
// Used for storing NULLs.
|
||||
// Used for storing nulls.
|
||||
void MacroAssembler::store_heap_oop_null(Address dst) {
|
||||
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
|
||||
}
|
||||
@ -2366,7 +2366,7 @@ void MacroAssembler::membar(uint32_t order_constraint) {
|
||||
address prev = pc() - NativeMembar::instruction_size;
|
||||
address last = code()->last_insn();
|
||||
|
||||
if (last != NULL && nativeInstruction_at(last)->is_membar() && prev == last) {
|
||||
if (last != nullptr && nativeInstruction_at(last)->is_membar() && prev == last) {
|
||||
NativeMembar *bar = NativeMembar_at(prev);
|
||||
// We are merging two memory barrier instructions. On RISCV we
|
||||
// can do this simply by ORing them together.
|
||||
@ -2405,8 +2405,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
Register tmp_reg,
|
||||
Label& L_success) {
|
||||
Label L_failure;
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass, tmp_reg, &L_success, &L_failure, NULL);
|
||||
check_klass_subtype_slow_path(sub_klass, super_klass, tmp_reg, noreg, &L_success, NULL);
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass, tmp_reg, &L_success, &L_failure, nullptr);
|
||||
check_klass_subtype_slow_path(sub_klass, super_klass, tmp_reg, noreg, &L_success, nullptr);
|
||||
bind(L_failure);
|
||||
}
|
||||
|
||||
@ -2443,7 +2443,7 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
|
||||
bind(nope);
|
||||
membar(AnyAny);
|
||||
mv(oldv, tmp);
|
||||
if (fail != NULL) {
|
||||
if (fail != nullptr) {
|
||||
j(*fail);
|
||||
}
|
||||
}
|
||||
@ -2707,7 +2707,7 @@ ATOMIC_XCHGU(xchgalwu, xchgalw)
|
||||
|
||||
void MacroAssembler::far_jump(Address entry, Register tmp) {
|
||||
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
assert(CodeCache::find_blob(entry.target()) != nullptr,
|
||||
"destination of far call not found in code cache");
|
||||
assert(entry.rspec().type() == relocInfo::external_word_type
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
@ -2728,7 +2728,7 @@ void MacroAssembler::far_jump(Address entry, Register tmp) {
|
||||
|
||||
void MacroAssembler::far_call(Address entry, Register tmp) {
|
||||
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
assert(CodeCache::find_blob(entry.target()) != nullptr,
|
||||
"destination of far call not found in code cache");
|
||||
assert(entry.rspec().type() == relocInfo::external_word_type
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
@ -2764,10 +2764,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
|
||||
|
||||
Label L_fallthrough;
|
||||
int label_nulls = 0;
|
||||
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one NULL in batch");
|
||||
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
|
||||
assert(label_nulls <= 1, "at most one null in batch");
|
||||
|
||||
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
|
||||
int sco_offset = in_bytes(Klass::super_check_offset_offset());
|
||||
@ -2850,10 +2850,10 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
||||
|
||||
Label L_fallthrough;
|
||||
int label_nulls = 0;
|
||||
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
|
||||
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
|
||||
|
||||
assert(label_nulls <= 1, "at most one NULL in the batch");
|
||||
assert(label_nulls <= 1, "at most one null in the batch");
|
||||
|
||||
// A couple of useful fields in sub_klass:
|
||||
int ss_offset = in_bytes(Klass::secondary_supers_offset());
|
||||
@ -3056,8 +3056,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
{
|
||||
ThreadInVMfromUnknown tiv;
|
||||
assert (UseCompressedOops, "should only be used for compressed oops");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
assert (Universe::heap() != nullptr, "java heap should be initialized");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
|
||||
}
|
||||
#endif
|
||||
@ -3070,7 +3070,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
|
||||
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(!Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
@ -3099,9 +3099,9 @@ address MacroAssembler::trampoline_call(Address entry) {
|
||||
code()->share_trampoline_for(entry.target(), offset());
|
||||
} else {
|
||||
address stub = emit_trampoline_stub(offset(), target);
|
||||
if (stub == NULL) {
|
||||
if (stub == nullptr) {
|
||||
postcond(pc() == badAddress);
|
||||
return NULL; // CodeCache is full
|
||||
return nullptr; // CodeCache is full
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3126,7 +3126,7 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
|
||||
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
|
||||
IncompressibleRegion ir(this); // relocations
|
||||
movptr(t1, (address)Universe::non_oop_word());
|
||||
assert_cond(entry != NULL);
|
||||
assert_cond(entry != nullptr);
|
||||
return trampoline_call(Address(entry, rh));
|
||||
}
|
||||
|
||||
@ -3145,8 +3145,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
address dest) {
|
||||
// Max stub size: alignment nop, TrampolineStub.
|
||||
address stub = start_a_stub(max_trampoline_stub_size());
|
||||
if (stub == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
if (stub == nullptr) {
|
||||
return nullptr; // CodeBuffer::expand failed
|
||||
}
|
||||
|
||||
// We are always 4-byte aligned here.
|
||||
@ -3895,13 +3895,13 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) {
|
||||
bltu(cnt, t0, around);
|
||||
{
|
||||
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::riscv::zero_blocks());
|
||||
assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
|
||||
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
|
||||
if (StubRoutines::riscv::complete()) {
|
||||
address tpc = trampoline_call(zero_blocks);
|
||||
if (tpc == NULL) {
|
||||
if (tpc == nullptr) {
|
||||
DEBUG_ONLY(reset_labels(around));
|
||||
postcond(pc() == badAddress);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
jal(zero_blocks);
|
||||
@ -4319,12 +4319,12 @@ void MacroAssembler::object_move(OopMap* map,
|
||||
VMRegPair dst,
|
||||
bool is_receiver,
|
||||
int* receiver_offset) {
|
||||
assert_cond(map != NULL && receiver_offset != NULL);
|
||||
assert_cond(map != nullptr && receiver_offset != nullptr);
|
||||
|
||||
// must pass a handle. First figure out the location we use as a handle
|
||||
Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register();
|
||||
|
||||
// See if oop is NULL if it is we need no handle
|
||||
// See if oop is null if it is we need no handle
|
||||
|
||||
if (src.first()->is_stack()) {
|
||||
// Oop is already on the stack as an argument
|
||||
@ -4336,7 +4336,7 @@ void MacroAssembler::object_move(OopMap* map,
|
||||
|
||||
ld(t0, Address(fp, reg2offset_in(src.first())));
|
||||
la(rHandle, Address(fp, reg2offset_in(src.first())));
|
||||
// conditionally move a NULL
|
||||
// conditionally move a null
|
||||
Label notZero1;
|
||||
bnez(t0, notZero1);
|
||||
mv(rHandle, zr);
|
||||
@ -4344,7 +4344,7 @@ void MacroAssembler::object_move(OopMap* map,
|
||||
} else {
|
||||
|
||||
// Oop is in a register we must store it to the space we reserve
|
||||
// on the stack for oop_handles and pass a handle if oop is non-NULL
|
||||
// on the stack for oop_handles and pass a handle if oop is non-null
|
||||
|
||||
const Register rOop = src.first()->as_Register();
|
||||
int oop_slot = -1;
|
||||
@ -4371,7 +4371,7 @@ void MacroAssembler::object_move(OopMap* map,
|
||||
int offset = oop_slot * VMRegImpl::stack_slot_size;
|
||||
|
||||
map->set_oop(VMRegImpl::stack2reg(oop_slot));
|
||||
// Store oop in handle area, may be NULL
|
||||
// Store oop in handle area, may be null
|
||||
sd(rOop, Address(sp, offset));
|
||||
if (is_receiver) {
|
||||
*receiver_offset = offset;
|
||||
|
@ -154,7 +154,7 @@ class MacroAssembler: public Assembler {
|
||||
virtual void call_VM_leaf_base(
|
||||
address entry_point, // the entry point
|
||||
int number_of_arguments, // the number of arguments to pop after the call
|
||||
Label* retaddr = NULL
|
||||
Label* retaddr = nullptr
|
||||
);
|
||||
|
||||
virtual void call_VM_leaf_base(
|
||||
@ -218,19 +218,19 @@ class MacroAssembler: public Assembler {
|
||||
void store_klass_gap(Register dst, Register src);
|
||||
|
||||
// currently unimplemented
|
||||
// Used for storing NULL. All other oop constants should be
|
||||
// Used for storing null. All other oop constants should be
|
||||
// stored using routines that take a jobject.
|
||||
void store_heap_oop_null(Address dst);
|
||||
|
||||
// This dummy is to prevent a call to store_heap_oop from
|
||||
// converting a zero (linked NULL) into a Register by giving
|
||||
// converting a zero (linked null) into a Register by giving
|
||||
// the compiler two choices it can't resolve
|
||||
|
||||
void store_heap_oop(Address dst, void* dummy);
|
||||
|
||||
// Support for NULL-checks
|
||||
// Support for null-checks
|
||||
//
|
||||
// Generates code that causes a NULL OS exception if the content of reg is NULL.
|
||||
// Generates code that causes a null OS exception if the content of reg is null.
|
||||
// If the accessed location is M[reg + offset] and the offset is known, provide the
|
||||
// offset. No explicit code generateion is needed if the offset is within a certain
|
||||
// range (0 <= offset <= page_size).
|
||||
@ -291,7 +291,7 @@ class MacroAssembler: public Assembler {
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
// One of the three labels can be NULL, meaning take the fall-through.
|
||||
// One of the three labels can be null, meaning take the fall-through.
|
||||
// If super_check_offset is -1, the value is loaded up from super_klass.
|
||||
// No registers are killed, except tmp_reg
|
||||
void check_klass_subtype_fast_path(Register sub_klass,
|
||||
@ -394,7 +394,7 @@ class MacroAssembler: public Assembler {
|
||||
// Required platform-specific helpers for Label::patch_instructions.
|
||||
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
|
||||
static int pd_patch_instruction_size(address branch, address target);
|
||||
static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) {
|
||||
static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
|
||||
pd_patch_instruction_size(branch, target);
|
||||
}
|
||||
static address pd_call_destination(address branch) {
|
||||
@ -589,7 +589,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(Register Rs1, Register Rs2, const address dest) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t offset = dest - pc(); \
|
||||
guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \
|
||||
Assembler::NAME(Rs1, Rs2, offset); \
|
||||
@ -779,7 +779,7 @@ public:
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(Register Rd, address dest) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm32(distance)) { \
|
||||
auipc(Rd, (int32_t)distance + 0x800); \
|
||||
@ -836,7 +836,7 @@ public:
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm32(distance)) { \
|
||||
auipc(temp, (int32_t)distance + 0x800); \
|
||||
@ -896,7 +896,7 @@ public:
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(Register Rs, address dest, Register temp = t0) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
assert_different_registers(Rs, temp); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm32(distance)) { \
|
||||
@ -942,7 +942,7 @@ public:
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert_cond(dest != nullptr); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_simm32(distance)) { \
|
||||
auipc(temp, (int32_t)distance + 0x800); \
|
||||
@ -1138,7 +1138,7 @@ public:
|
||||
// - relocInfo::static_call_type
|
||||
// - relocInfo::virtual_call_type
|
||||
//
|
||||
// Return: the call PC or NULL if CodeCache is full.
|
||||
// Return: the call PC or null if CodeCache is full.
|
||||
address trampoline_call(Address entry);
|
||||
address ic_call(address entry, jint method_index = 0);
|
||||
|
||||
@ -1158,7 +1158,7 @@ public:
|
||||
|
||||
void cmpptr(Register src1, Address src2, Label& equal);
|
||||
|
||||
void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = NULL, Label* L_slow_path = NULL);
|
||||
void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr);
|
||||
void load_method_holder_cld(Register result, Register method);
|
||||
void load_method_holder(Register holder, Register method);
|
||||
|
||||
@ -1332,7 +1332,7 @@ public:
|
||||
void rt_call(address dest, Register tmp = t0);
|
||||
|
||||
void call(const address dest, Register temp = t0) {
|
||||
assert_cond(dest != NULL);
|
||||
assert_cond(dest != nullptr);
|
||||
assert(temp != noreg, "expecting a register");
|
||||
int32_t offset = 0;
|
||||
mv(temp, dest, offset);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -93,12 +93,12 @@
|
||||
|
||||
static bool const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
return CompressedOops::base() == NULL;
|
||||
return CompressedOops::base() == nullptr;
|
||||
}
|
||||
|
||||
static bool const_klass_prefer_decode() {
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
return CompressedKlassPointers::base() == NULL;
|
||||
return CompressedKlassPointers::base() == nullptr;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -173,14 +173,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
|
||||
// They all allow an appendix argument.
|
||||
__ ebreak(); // empty stubs make SG sick
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// No need in interpreter entry for linkToNative for now.
|
||||
// Interpreter calls compiled entry through i2c.
|
||||
if (iid == vmIntrinsics::_linkToNative) {
|
||||
__ ebreak();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// x19_sender_sp: sender SP (must preserve; see prepare_to_jump_from_interpreted)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -41,27 +41,27 @@
|
||||
#endif
|
||||
|
||||
Register NativeInstruction::extract_rs1(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rs2(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rd(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7));
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_opcode(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(((unsigned*)instr)[0], 6, 0);
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_funct3(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(((unsigned*)instr)[0], 14, 12);
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ address NativeCall::destination() const {
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
if (nm != nullptr && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
// Yes we do, so get the destination from the trampoline stub.
|
||||
const address trampoline_stub_addr = destination;
|
||||
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
|
||||
@ -157,7 +157,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
if (trampoline_stub_addr != NULL) {
|
||||
if (trampoline_stub_addr != nullptr) {
|
||||
assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
|
||||
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
}
|
||||
@ -166,7 +166,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
if (Assembler::reachable_from_branch_at(addr_call, dest)) {
|
||||
set_destination(dest);
|
||||
} else {
|
||||
assert (trampoline_stub_addr != NULL, "we need a trampoline");
|
||||
assert (trampoline_stub_addr != nullptr, "we need a trampoline");
|
||||
set_destination(trampoline_stub_addr);
|
||||
}
|
||||
|
||||
@ -177,18 +177,18 @@ address NativeCall::get_trampoline() {
|
||||
address call_addr = addr_at(0);
|
||||
|
||||
CodeBlob *code = CodeCache::find_blob(call_addr);
|
||||
assert(code != NULL, "Could not find the containing code blob");
|
||||
assert(code != nullptr, "Could not find the containing code blob");
|
||||
|
||||
address jal_destination = MacroAssembler::pd_call_destination(call_addr);
|
||||
if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
|
||||
if (code != nullptr && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
|
||||
return jal_destination;
|
||||
}
|
||||
|
||||
if (code != NULL && code->is_nmethod()) {
|
||||
if (code != nullptr && code->is_nmethod()) {
|
||||
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Inserts a native call instruction at a given pc
|
||||
@ -226,7 +226,7 @@ void NativeMovConstReg::set_data(intptr_t x) {
|
||||
// instruction in oops section.
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
RelocIterator iter(nm, instruction_address(), next_instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
@ -329,7 +329,7 @@ bool NativeInstruction::is_safepoint_poll() {
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_lwu_to_zr(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0000011 &&
|
||||
extract_funct3(instr) == 0b110 &&
|
||||
extract_rd(instr) == zr); // zr
|
||||
@ -342,7 +342,7 @@ bool NativeInstruction::is_sigill_not_entrant() {
|
||||
}
|
||||
|
||||
void NativeIllegalInstruction::insert(address code_pos) {
|
||||
assert_cond(code_pos != NULL);
|
||||
assert_cond(code_pos != nullptr);
|
||||
*(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -68,21 +68,21 @@ class NativeInstruction {
|
||||
bool is_call() const { return is_call_at(addr_at(0)); }
|
||||
bool is_jump() const { return is_jump_at(addr_at(0)); }
|
||||
|
||||
static bool is_jal_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1101111; }
|
||||
static bool is_jalr_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_branch_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100011; }
|
||||
static bool is_ld_at(address instr) { assert_cond(instr != NULL); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
|
||||
static bool is_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000011; }
|
||||
static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000111; }
|
||||
static bool is_auipc_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010111; }
|
||||
static bool is_jump_at(address instr) { assert_cond(instr != NULL); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
|
||||
static bool is_addi_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != NULL); return is_addiw_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != NULL); return is_lui_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
|
||||
static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
|
||||
static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
|
||||
static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
|
||||
static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
|
||||
static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
|
||||
static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
|
||||
static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_slli_shift_at(address instr, uint32_t shift) {
|
||||
assert_cond(instr != NULL);
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0010011 && // opcode field
|
||||
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
|
||||
Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift); // shamt field
|
||||
@ -313,14 +313,14 @@ class NativeCall: public NativeInstruction {
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset);
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_before(address return_address) {
|
||||
assert_cond(return_address != NULL);
|
||||
assert_cond(return_address != nullptr);
|
||||
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
@ -357,7 +357,7 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
return addr_at(load_pc_relative_instruction_size);
|
||||
}
|
||||
guarantee(false, "Unknown instruction in NativeMovConstReg");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
intptr_t data() const;
|
||||
@ -378,14 +378,14 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
@ -477,7 +477,7 @@ public:
|
||||
};
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
|
||||
debug_only(jump->verify();)
|
||||
return jump;
|
||||
@ -508,7 +508,7 @@ class NativeCallTrampolineStub : public NativeInstruction {
|
||||
data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr
|
||||
};
|
||||
|
||||
address destination(nmethod *nm = NULL) const;
|
||||
address destination(nmethod *nm = nullptr) const;
|
||||
void set_destination(address new_destination);
|
||||
ptrdiff_t destination_offset() const;
|
||||
};
|
||||
@ -523,7 +523,7 @@ inline bool is_NativeCallTrampolineStub_at(address addr) {
|
||||
// 1). check the instructions: auipc + ld + jalr
|
||||
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
|
||||
// 3). check if the offset in ld[31:20] equals the data_offset
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
const int instr_size = NativeInstruction::instruction_size;
|
||||
if (NativeInstruction::is_auipc_at(addr) &&
|
||||
NativeInstruction::is_ld_at(addr + instr_size) &&
|
||||
@ -539,7 +539,7 @@ inline bool is_NativeCallTrampolineStub_at(address addr) {
|
||||
}
|
||||
|
||||
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
|
||||
return (NativeCallTrampolineStub*)addr;
|
||||
}
|
||||
@ -551,7 +551,7 @@ public:
|
||||
};
|
||||
|
||||
inline NativeMembar *NativeMembar_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert_cond(addr != nullptr);
|
||||
assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
|
||||
return (NativeMembar*)addr;
|
||||
}
|
||||
@ -581,7 +581,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
|
||||
if (nop->check()) {
|
||||
return nop;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {
|
||||
@ -603,7 +603,7 @@ class NativeDeoptInstruction: public NativeInstruction {
|
||||
void verify();
|
||||
|
||||
static bool is_deopt_at(address instr) {
|
||||
assert(instr != NULL, "");
|
||||
assert(instr != nullptr, "");
|
||||
uint32_t value = *(uint32_t *) instr;
|
||||
// 0xc0201073 encodes CSRRW x0, instret, x0
|
||||
return value == 0xc0201073;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -34,10 +34,10 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
|
||||
VectorRegister::max_slots_per_register;
|
||||
intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
|
||||
address base_location = location(base_reg, nullptr);
|
||||
if (base_location != NULL) {
|
||||
if (base_location != nullptr) {
|
||||
return base_location + offset_in_bytes;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
return location(base_reg->next(slot_idx), nullptr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
private:
|
||||
// This is the hook for finding a register in an "well-known" location,
|
||||
// such as a register block of a predetermined format.
|
||||
address pd_location(VMReg reg) const { return NULL; }
|
||||
address pd_location(VMReg reg) const { return nullptr; }
|
||||
address pd_location(VMReg base_reg, int slot_idx) const;
|
||||
|
||||
// no PD state to clear or copy:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -41,7 +41,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
switch (type()) {
|
||||
case relocInfo::oop_type: {
|
||||
oop_Relocation *reloc = (oop_Relocation *)this;
|
||||
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL
|
||||
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() isn't null
|
||||
if (NativeInstruction::is_load_pc_relative_at(addr())) {
|
||||
address constptr = (address)code()->oop_addr_at(reloc->oop_index());
|
||||
bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr);
|
||||
@ -62,11 +62,11 @@ address Relocation::pd_call_destination(address orig_addr) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != NULL) {
|
||||
if (trampoline != nullptr) {
|
||||
return nativeCallTrampolineStub_at(trampoline)->destination();
|
||||
}
|
||||
}
|
||||
if (orig_addr != NULL) {
|
||||
if (orig_addr != nullptr) {
|
||||
// the extracted address from the instructions in address orig_addr
|
||||
address new_addr = MacroAssembler::pd_call_destination(orig_addr);
|
||||
// If call is branch to self, don't try to relocate it, just leave it
|
||||
@ -83,7 +83,7 @@ void Relocation::pd_set_call_destination(address x) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != NULL) {
|
||||
if (trampoline != nullptr) {
|
||||
nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
|
||||
return;
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
|
||||
assert_cond(oop_maps != NULL && oop_map != NULL);
|
||||
assert_cond(oop_maps != nullptr && oop_map != nullptr);
|
||||
|
||||
int sp_offset_in_slots = 0;
|
||||
int step_in_slots = 0;
|
||||
@ -659,7 +659,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
address c2i_entry = __ pc();
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
address c2i_no_clinit_check_entry = NULL;
|
||||
address c2i_no_clinit_check_entry = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
|
||||
@ -696,7 +696,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
VMRegPair *regs,
|
||||
VMRegPair *regs2,
|
||||
int total_args_passed) {
|
||||
assert(regs2 == NULL, "not needed on riscv");
|
||||
assert(regs2 == nullptr, "not needed on riscv");
|
||||
|
||||
// We return the amount of VMRegImpl stack slots we need to reserve for all
|
||||
// the arguments NOT counting out_preserve_stack_slots.
|
||||
@ -1310,14 +1310,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(-1),
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
(OopMapSet*)nullptr);
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
assert(native_func != nullptr, "must have function");
|
||||
|
||||
// An OopMap for lock (and class if static)
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
|
||||
// We have received a description of where all the java arg are located
|
||||
@ -1331,7 +1331,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
BasicType* in_elem_bt = NULL;
|
||||
BasicType* in_elem_bt = nullptr;
|
||||
|
||||
int argc = 0;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
@ -1345,7 +1345,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Now figure out where the args must be stored and how much stack space
|
||||
// they require.
|
||||
int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
|
||||
int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
|
||||
|
||||
// Compute framesize for the wrapper. We need to handlize all oops in
|
||||
// incoming registers
|
||||
@ -1469,8 +1469,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ sub(sp, sp, stack_size - 2 * wordSize);
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
assert_cond(bs != NULL);
|
||||
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
|
||||
assert_cond(bs != nullptr);
|
||||
bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
|
||||
|
||||
// Frame is now completed as far as size and linkage.
|
||||
int frame_complete = ((intptr_t)__ pc()) - start;
|
||||
@ -1507,7 +1507,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// caller.
|
||||
//
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
int float_args = 0;
|
||||
int int_args = 0;
|
||||
@ -1680,7 +1680,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
|
||||
// src -> dest if dest == x10 else x10 <- dest
|
||||
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
|
||||
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
@ -2026,7 +2026,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
|
||||
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
|
||||
oop_maps);
|
||||
assert(nm != NULL, "create native nmethod fail!");
|
||||
assert(nm != nullptr, "create native nmethod fail!");
|
||||
return nm;
|
||||
}
|
||||
|
||||
@ -2057,9 +2057,9 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
CodeBuffer buffer("deopt_blob", 2048 + pad, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
int frame_size_in_words = -1;
|
||||
OopMap* map = NULL;
|
||||
OopMap* map = nullptr;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
assert_cond(masm != NULL && oop_maps != NULL);
|
||||
assert_cond(masm != nullptr && oop_maps != nullptr);
|
||||
RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
|
||||
|
||||
// -------------
|
||||
@ -2420,7 +2420,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
masm->flush();
|
||||
|
||||
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
|
||||
assert(_deopt_blob != NULL, "create deoptimization blob fail!");
|
||||
assert(_deopt_blob != nullptr, "create deoptimization blob fail!");
|
||||
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI) {
|
||||
@ -2450,7 +2450,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// Setup code generation tools
|
||||
CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
|
||||
|
||||
@ -2497,7 +2497,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// Set an oopmap for the call site
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
|
||||
assert_cond(oop_maps != NULL && map != NULL);
|
||||
assert_cond(oop_maps != nullptr && map != nullptr);
|
||||
|
||||
// location of fp is known implicitly by the frame sender code
|
||||
|
||||
@ -2649,16 +2649,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
|
||||
ResourceMark rm;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
OopMap* map = NULL;
|
||||
assert_cond(oop_maps != nullptr);
|
||||
OopMap* map = nullptr;
|
||||
|
||||
// Allocate space for the code. Setup code generation tools.
|
||||
CodeBuffer buffer("handler_blob", 2048, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
address start = __ pc();
|
||||
address call_pc = NULL;
|
||||
address call_pc = nullptr;
|
||||
int frame_size_in_words = -1;
|
||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||
RegisterSaver reg_saver(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
|
||||
@ -2773,21 +2773,21 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// must do any gc of the args.
|
||||
//
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
|
||||
assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");
|
||||
assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
|
||||
|
||||
// allocate space for the code
|
||||
ResourceMark rm;
|
||||
|
||||
CodeBuffer buffer(name, 1000, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
int frame_size_in_words = -1;
|
||||
RegisterSaver reg_saver(false /* save_vectors */);
|
||||
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
OopMap* map = NULL;
|
||||
assert_cond(oop_maps != nullptr);
|
||||
OopMap* map = nullptr;
|
||||
|
||||
int start = __ offset();
|
||||
|
||||
@ -2896,7 +2896,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
// Setup code generation tools
|
||||
CodeBuffer buffer("exception_blob", 2048, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
// TODO check various assumptions made here
|
||||
//
|
||||
@ -2952,7 +2952,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
// exception.
|
||||
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
assert_cond(oop_maps != nullptr);
|
||||
|
||||
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
|
||||
|
||||
|
@ -490,7 +490,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ sw(t0, Address(xthread, Thread::exception_line_offset()));
|
||||
|
||||
// complete return to VM
|
||||
assert(StubRoutines::_call_stub_return_address != NULL,
|
||||
assert(StubRoutines::_call_stub_return_address != nullptr,
|
||||
"_call_stub_return_address must have been generated before");
|
||||
__ j(StubRoutines::_call_stub_return_address);
|
||||
|
||||
@ -612,7 +612,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// object is in x10
|
||||
// make sure object is 'reasonable'
|
||||
__ beqz(x10, exit); // if obj is NULL it is OK
|
||||
__ beqz(x10, exit); // if obj is null it is OK
|
||||
|
||||
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs_asm->check_oop(_masm, x10, c_rarg2, c_rarg3, error);
|
||||
@ -726,7 +726,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
assert_different_registers(s, d, count, t0);
|
||||
|
||||
Label again, drain;
|
||||
const char* stub_name = NULL;
|
||||
const char* stub_name = nullptr;
|
||||
if (direction == copy_forwards) {
|
||||
stub_name = "forward_copy_longs";
|
||||
} else {
|
||||
@ -1118,7 +1118,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@ -1186,7 +1186,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@ -1469,8 +1469,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
Label L_miss;
|
||||
|
||||
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, super_check_offset);
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
|
||||
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr, super_check_offset);
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr);
|
||||
|
||||
// Fall through on failure!
|
||||
__ BIND(L_miss);
|
||||
@ -1530,7 +1530,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
// Caller of this entry point must set up the argument registers.
|
||||
if (entry != NULL) {
|
||||
if (entry != nullptr) {
|
||||
*entry = __ pc();
|
||||
BLOCK_COMMENT("Entry:");
|
||||
}
|
||||
@ -1675,8 +1675,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address short_copy_entry,
|
||||
address int_copy_entry,
|
||||
address long_copy_entry) {
|
||||
assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL &&
|
||||
int_copy_entry != NULL && long_copy_entry != NULL);
|
||||
assert_cond(byte_copy_entry != nullptr && short_copy_entry != nullptr &&
|
||||
int_copy_entry != nullptr && long_copy_entry != nullptr);
|
||||
Label L_long_aligned, L_int_aligned, L_short_aligned;
|
||||
const Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
|
||||
|
||||
@ -1730,9 +1730,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address oop_copy_entry,
|
||||
address long_copy_entry, address checkcast_copy_entry) {
|
||||
assert_cond(byte_copy_entry != NULL && short_copy_entry != NULL &&
|
||||
int_copy_entry != NULL && oop_copy_entry != NULL &&
|
||||
long_copy_entry != NULL && checkcast_copy_entry != NULL);
|
||||
assert_cond(byte_copy_entry != nullptr && short_copy_entry != nullptr &&
|
||||
int_copy_entry != nullptr && oop_copy_entry != nullptr &&
|
||||
long_copy_entry != nullptr && checkcast_copy_entry != nullptr);
|
||||
Label L_failed, L_failed_0, L_objArray;
|
||||
Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
|
||||
|
||||
@ -1765,13 +1765,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (2) src_pos must not be negative.
|
||||
// (3) dst_pos must not be negative.
|
||||
// (4) length must not be negative.
|
||||
// (5) src klass and dst klass should be the same and not NULL.
|
||||
// (5) src klass and dst klass should be the same and not null.
|
||||
// (6) src and dst should be arrays.
|
||||
// (7) src_pos + length must not exceed length of src.
|
||||
// (8) dst_pos + length must not exceed length of dst.
|
||||
//
|
||||
|
||||
// if [src == NULL] then return -1
|
||||
// if src is null then return -1
|
||||
__ beqz(src, L_failed);
|
||||
|
||||
// if [src_pos < 0] then return -1
|
||||
@ -1779,7 +1779,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ test_bit(t0, src_pos, 31);
|
||||
__ bnez(t0, L_failed);
|
||||
|
||||
// if [dst == NULL] then return -1
|
||||
// if dst is null then return -1
|
||||
__ beqz(dst, L_failed);
|
||||
|
||||
// if [dst_pos < 0] then return -1
|
||||
@ -1803,7 +1803,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
{
|
||||
BLOCK_COMMENT("assert klasses not null {");
|
||||
Label L1, L2;
|
||||
__ bnez(scratch_src_klass, L2); // it is broken if klass is NULL
|
||||
__ bnez(scratch_src_klass, L2); // it is broken if klass is null
|
||||
__ bind(L1);
|
||||
__ stop("broken null klass");
|
||||
__ bind(L2);
|
||||
@ -1833,7 +1833,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ load_klass(t1, dst);
|
||||
__ bne(t1, scratch_src_klass, L_failed);
|
||||
|
||||
// if [src->is_Array() != NULL] then return -1
|
||||
// if src->is_Array() isn't null then return -1
|
||||
// i.e. (lh >= 0)
|
||||
__ test_bit(t0, lh, 31);
|
||||
__ beqz(t0, L_failed);
|
||||
@ -2176,13 +2176,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
address entry = NULL;
|
||||
address entry_jbyte_arraycopy = NULL;
|
||||
address entry_jshort_arraycopy = NULL;
|
||||
address entry_jint_arraycopy = NULL;
|
||||
address entry_oop_arraycopy = NULL;
|
||||
address entry_jlong_arraycopy = NULL;
|
||||
address entry_checkcast_arraycopy = NULL;
|
||||
address entry = nullptr;
|
||||
address entry_jbyte_arraycopy = nullptr;
|
||||
address entry_jshort_arraycopy = nullptr;
|
||||
address entry_jint_arraycopy = nullptr;
|
||||
address entry_oop_arraycopy = nullptr;
|
||||
address entry_jlong_arraycopy = nullptr;
|
||||
address entry_checkcast_arraycopy = nullptr;
|
||||
|
||||
generate_copy_longs(copy_f, c_rarg0, c_rarg1, t1, copy_forwards);
|
||||
generate_copy_longs(copy_b, c_rarg0, c_rarg1, t1, copy_backwards);
|
||||
@ -2198,7 +2198,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
"jbyte_arraycopy");
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
|
||||
"arrayof_jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL,
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr,
|
||||
"arrayof_jbyte_arraycopy");
|
||||
|
||||
//*** jshort
|
||||
@ -2210,7 +2210,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
"jshort_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
|
||||
"arrayof_jshort_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL,
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr,
|
||||
"arrayof_jshort_arraycopy");
|
||||
|
||||
//*** jint
|
||||
@ -2253,7 +2253,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
= generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit
|
||||
= generate_conjoint_oop_copy(aligned, entry, NULL, "arrayof_oop_arraycopy_uninit",
|
||||
= generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
}
|
||||
|
||||
@ -2263,7 +2263,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit;
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
|
||||
/*dest_uninitialized*/true);
|
||||
|
||||
|
||||
@ -3722,7 +3722,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// the compilers are responsible for supplying a continuation point
|
||||
// if they expect all registers to be preserved.
|
||||
// n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
|
||||
assert_cond(runtime_entry != NULL);
|
||||
assert_cond(runtime_entry != nullptr);
|
||||
enum layout {
|
||||
fp_off = 0,
|
||||
fp_off2,
|
||||
@ -3737,7 +3737,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
CodeBuffer code(name, insts_size, locs_size);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
assert_cond(oop_maps != NULL && masm != NULL);
|
||||
assert_cond(oop_maps != nullptr && masm != nullptr);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@ -3773,7 +3773,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Generate oop map
|
||||
OopMap* map = new OopMap(framesize, 0);
|
||||
assert_cond(map != NULL);
|
||||
assert_cond(map != nullptr);
|
||||
|
||||
oop_maps->add_gc_map(the_pc - start, map);
|
||||
|
||||
@ -3798,7 +3798,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
oop_maps, false);
|
||||
assert(stub != NULL, "create runtime stub fail!");
|
||||
assert(stub != nullptr, "create runtime stub fail!");
|
||||
return stub->entry_point();
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -34,25 +34,25 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::riscv::_get_previous_sp_entry = NULL;
|
||||
address StubRoutines::riscv::_get_previous_sp_entry = nullptr;
|
||||
|
||||
address StubRoutines::riscv::_f2i_fixup = NULL;
|
||||
address StubRoutines::riscv::_f2l_fixup = NULL;
|
||||
address StubRoutines::riscv::_d2i_fixup = NULL;
|
||||
address StubRoutines::riscv::_d2l_fixup = NULL;
|
||||
address StubRoutines::riscv::_float_sign_mask = NULL;
|
||||
address StubRoutines::riscv::_float_sign_flip = NULL;
|
||||
address StubRoutines::riscv::_double_sign_mask = NULL;
|
||||
address StubRoutines::riscv::_double_sign_flip = NULL;
|
||||
address StubRoutines::riscv::_zero_blocks = NULL;
|
||||
address StubRoutines::riscv::_compare_long_string_LL = NULL;
|
||||
address StubRoutines::riscv::_compare_long_string_UU = NULL;
|
||||
address StubRoutines::riscv::_compare_long_string_LU = NULL;
|
||||
address StubRoutines::riscv::_compare_long_string_UL = NULL;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ll = NULL;
|
||||
address StubRoutines::riscv::_string_indexof_linear_uu = NULL;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ul = NULL;
|
||||
address StubRoutines::riscv::_large_byte_array_inflate = NULL;
|
||||
address StubRoutines::riscv::_method_entry_barrier = NULL;
|
||||
address StubRoutines::riscv::_f2i_fixup = nullptr;
|
||||
address StubRoutines::riscv::_f2l_fixup = nullptr;
|
||||
address StubRoutines::riscv::_d2i_fixup = nullptr;
|
||||
address StubRoutines::riscv::_d2l_fixup = nullptr;
|
||||
address StubRoutines::riscv::_float_sign_mask = nullptr;
|
||||
address StubRoutines::riscv::_float_sign_flip = nullptr;
|
||||
address StubRoutines::riscv::_double_sign_mask = nullptr;
|
||||
address StubRoutines::riscv::_double_sign_flip = nullptr;
|
||||
address StubRoutines::riscv::_zero_blocks = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ul = nullptr;
|
||||
address StubRoutines::riscv::_large_byte_array_inflate = nullptr;
|
||||
address StubRoutines::riscv::_method_entry_barrier = nullptr;
|
||||
|
||||
bool StubRoutines::riscv::_completed = false;
|
||||
|
@ -101,7 +101,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// stack args <- esp
|
||||
// garbage
|
||||
// expression stack bottom
|
||||
// bcp (NULL)
|
||||
// bcp (null)
|
||||
// ...
|
||||
|
||||
// Restore ra
|
||||
@ -160,8 +160,8 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
// [ arg ]
|
||||
// retaddr in ra
|
||||
|
||||
address fn = NULL;
|
||||
address entry_point = NULL;
|
||||
address fn = nullptr;
|
||||
address entry_point = nullptr;
|
||||
Register continuation = ra;
|
||||
switch (kind) {
|
||||
case Interpreter::java_lang_math_abs:
|
||||
@ -182,7 +182,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dsin() == NULL) {
|
||||
if (StubRoutines::dsin() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
|
||||
@ -195,7 +195,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dcos() == NULL) {
|
||||
if (StubRoutines::dcos() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
|
||||
@ -208,7 +208,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dtan() == NULL) {
|
||||
if (StubRoutines::dtan() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
|
||||
@ -221,7 +221,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dlog() == NULL) {
|
||||
if (StubRoutines::dlog() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
|
||||
@ -234,7 +234,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dlog10() == NULL) {
|
||||
if (StubRoutines::dlog10() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
|
||||
@ -247,7 +247,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ mv(sp, x19_sender_sp);
|
||||
__ mv(x9, ra);
|
||||
continuation = x9; // The first callee-saved register
|
||||
if (StubRoutines::dexp() == NULL) {
|
||||
if (StubRoutines::dexp() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
|
||||
@ -261,7 +261,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ fld(f10, Address(esp, 2 * Interpreter::stackElementSize));
|
||||
__ fld(f11, Address(esp));
|
||||
__ mv(sp, x19_sender_sp);
|
||||
if (StubRoutines::dpow() == NULL) {
|
||||
if (StubRoutines::dpow() == nullptr) {
|
||||
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
|
||||
} else {
|
||||
fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
|
||||
@ -291,7 +291,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
default:
|
||||
;
|
||||
}
|
||||
if (entry_point != NULL) {
|
||||
if (entry_point != nullptr) {
|
||||
__ jr(continuation);
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// abstract method entry
|
||||
|
||||
// pop return address, reset last_sp to NULL
|
||||
// pop return address, reset last_sp to null
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
@ -388,7 +388,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
|
||||
|
||||
address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
const char* name, const char* message, bool pass_oop) {
|
||||
assert(!pass_oop || message == NULL, "either oop or message but not both");
|
||||
assert(!pass_oop || message == nullptr, "either oop or message but not both");
|
||||
address entry = __ pc();
|
||||
if (pass_oop) {
|
||||
// object is at TOS
|
||||
@ -405,9 +405,9 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
create_klass_exception),
|
||||
c_rarg1, c_rarg2);
|
||||
} else {
|
||||
// kind of lame ExternalAddress can't take NULL because
|
||||
// kind of lame ExternalAddress can't take null because
|
||||
// external_word_Relocation will assert.
|
||||
if (message != NULL) {
|
||||
if (message != nullptr) {
|
||||
__ la(c_rarg2, Address((address)message));
|
||||
} else {
|
||||
__ mv(c_rarg2, NULL_WORD);
|
||||
@ -426,7 +426,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
|
||||
// Restore stack bottom in case i2c adjusted stack
|
||||
__ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// and NULL it as marker that esp is now tos until next java call
|
||||
// and null it as marker that esp is now tos until next java call
|
||||
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
@ -483,7 +483,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
|
||||
// Restore expression stack pointer
|
||||
__ ld(esp, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
// null last_sp until next java call
|
||||
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
|
||||
// handle exceptions
|
||||
@ -497,7 +497,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
if (continuation == NULL) {
|
||||
if (continuation == nullptr) {
|
||||
__ dispatch_next(state, step);
|
||||
} else {
|
||||
__ jump_to_entry(continuation);
|
||||
@ -522,7 +522,7 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
|
||||
address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
||||
address runtime_entry) {
|
||||
assert_cond(runtime_entry != NULL);
|
||||
assert_cond(runtime_entry != nullptr);
|
||||
address entry = __ pc();
|
||||
__ push(state);
|
||||
__ push_cont_fastpath(xthread);
|
||||
@ -652,7 +652,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
// Note: the restored frame is not necessarily interpreted.
|
||||
// Use the shared runtime version of the StackOverflowError.
|
||||
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
|
||||
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
|
||||
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
|
||||
|
||||
// all done with frame size check
|
||||
@ -694,7 +694,7 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
{
|
||||
Label L;
|
||||
__ bnez(x10, L);
|
||||
__ stop("synchronization object is NULL");
|
||||
__ stop("synchronization object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -840,7 +840,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
Label slow_path;
|
||||
const Register local_0 = c_rarg0;
|
||||
// Check if local 0 != NULL
|
||||
// Check if local 0 isn't null
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ ld(local_0, Address(esp, 0));
|
||||
__ beqz(local_0, slow_path);
|
||||
@ -1630,7 +1630,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ bne(t1, t0, L_done);
|
||||
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument,or NULL.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument,or null.
|
||||
|
||||
__ ld(c_rarg0, Address(xlocals, 0));
|
||||
__ call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),c_rarg0, xmethod, xbcp);
|
||||
@ -1726,7 +1726,7 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
|
||||
address& fep,
|
||||
address& dep,
|
||||
address& vep) {
|
||||
assert(t != NULL && t->is_valid() && t->tos_in() == vtos, "illegal template");
|
||||
assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template");
|
||||
Label L;
|
||||
aep = __ pc(); __ push_ptr(); __ j(L);
|
||||
fep = __ pc(); __ push_f(); __ j(L);
|
||||
@ -1795,7 +1795,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
|
||||
// The run-time runtime saves the right registers, depending on
|
||||
// the tosca in-state for the given template.
|
||||
|
||||
assert(Interpreter::trace_code(t->tos_in()) != NULL, "entry must have been generated");
|
||||
assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
|
||||
__ jal(Interpreter::trace_code(t->tos_in()));
|
||||
__ reinit_heapbase();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -123,8 +123,8 @@ static inline Address at_tos_p5() {
|
||||
}
|
||||
|
||||
// Miscellaneous helper routines
|
||||
// Store an oop (or NULL) at the Address described by obj.
|
||||
// If val == noreg this means store a NULL
|
||||
// Store an oop (or null) at the Address described by obj.
|
||||
// If val == noreg this means store a null
|
||||
static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
Address dst,
|
||||
Register val,
|
||||
@ -391,7 +391,7 @@ void TemplateTable::fast_aldc(LdcType type) {
|
||||
__ ld(tmp, Address(rarg, offset));
|
||||
__ resolve_oop_handle(tmp, x15, t1);
|
||||
__ bne(result, tmp, notNull);
|
||||
__ mv(result, zr); // NULL object reference
|
||||
__ mv(result, zr); // null object reference
|
||||
__ bind(notNull);
|
||||
}
|
||||
|
||||
@ -1050,7 +1050,7 @@ void TemplateTable::aastore() {
|
||||
|
||||
Address element_address(x14, 0);
|
||||
|
||||
// do array store check - check for NULL value first
|
||||
// do array store check - check for null value first
|
||||
__ beqz(x10, is_null);
|
||||
|
||||
// Move subklass into x11
|
||||
@ -1078,11 +1078,11 @@ void TemplateTable::aastore() {
|
||||
do_oop_store(_masm, element_address, x10, IS_ARRAY);
|
||||
__ j(done);
|
||||
|
||||
// Have a NULL in x10, x13=array, x12=index. Store NULL at ary[idx]
|
||||
// Have a null in x10, x13=array, x12=index. Store null at ary[idx]
|
||||
__ bind(is_null);
|
||||
__ profile_null_seen(x12);
|
||||
|
||||
// Store a NULL
|
||||
// Store a null
|
||||
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
|
||||
|
||||
// Pop stack arguments
|
||||
@ -1717,7 +1717,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
x12);
|
||||
__ load_unsigned_byte(x11, Address(xbcp, 0)); // restore target bytecode
|
||||
|
||||
// x10: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
// x10: osr nmethod (osr ok) or null (osr not possible)
|
||||
// w11: target bytecode
|
||||
// x12: temporary
|
||||
__ beqz(x10, dispatch); // test result -- no osr if null
|
||||
@ -2181,7 +2181,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
__ load_resolved_method_at_index(byte_no, temp, Rcache);
|
||||
__ load_method_holder(temp, temp);
|
||||
__ clinit_barrier(temp, t0, NULL, &clinit_barrier_slow);
|
||||
__ clinit_barrier(temp, t0, nullptr, &clinit_barrier_slow);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2344,12 +2344,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
|
||||
__ la(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
|
||||
|
||||
if (is_static) {
|
||||
__ mv(c_rarg1, zr); // NULL object reference
|
||||
__ mv(c_rarg1, zr); // null object reference
|
||||
} else {
|
||||
__ ld(c_rarg1, at_tos()); // get object pointer without popping it
|
||||
__ verify_oop(c_rarg1);
|
||||
}
|
||||
// c_rarg1: object pointer or NULL
|
||||
// c_rarg1: object pointer or null
|
||||
// c_rarg2: cache entry pointer
|
||||
// c_rarg3: jvalue object on the stack
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
@ -2587,7 +2587,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
|
||||
__ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
|
||||
// object (tos)
|
||||
__ mv(c_rarg3, esp);
|
||||
// c_rarg1: object pointer set up above (NULL if static)
|
||||
// c_rarg1: object pointer set up above (null if static)
|
||||
// c_rarg2: cache entry pointer
|
||||
// c_rarg3: jvalue object on the stack
|
||||
__ call_VM(noreg,
|
||||
@ -3604,7 +3604,7 @@ void TemplateTable::checkcast() {
|
||||
__ bind(ok_is_subtype);
|
||||
__ mv(x10, x13); // Restore object in x13
|
||||
|
||||
// Collect counts on whether this test sees NULLs a lot or not.
|
||||
// Collect counts on whether this test sees nulls a lot or not.
|
||||
if (ProfileInterpreter) {
|
||||
__ j(done);
|
||||
__ bind(is_null);
|
||||
@ -3659,7 +3659,7 @@ void TemplateTable::instanceof() {
|
||||
__ bind(ok_is_subtype);
|
||||
__ mv(x10, 1);
|
||||
|
||||
// Collect counts on whether this test sees NULLs a lot or not.
|
||||
// Collect counts on whether this test sees nulls a lot or not.
|
||||
if (ProfileInterpreter) {
|
||||
__ j(done);
|
||||
__ bind(is_null);
|
||||
@ -3668,8 +3668,8 @@ void TemplateTable::instanceof() {
|
||||
__ bind(is_null); // same as 'done'
|
||||
}
|
||||
__ bind(done);
|
||||
// x10 = 0: obj == NULL or obj is not an instanceof the specified klass
|
||||
// x10 = 1: obj != NULL and obj is an instanceof the specified klass
|
||||
// x10 = 0: obj is null or obj is not an instanceof the specified klass
|
||||
// x10 = 1: obj isn't null and obj is an instanceof the specified klass
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -3730,7 +3730,7 @@ void TemplateTable::athrow() {
|
||||
void TemplateTable::monitorenter() {
|
||||
transition(atos, vtos);
|
||||
|
||||
// check for NULL object
|
||||
// check for null object
|
||||
__ null_check(x10);
|
||||
|
||||
const Address monitor_block_top(
|
||||
@ -3742,7 +3742,7 @@ void TemplateTable::monitorenter() {
|
||||
Label allocated;
|
||||
|
||||
// initialize entry pointer
|
||||
__ mv(c_rarg1, zr); // points to free slot or NULL
|
||||
__ mv(c_rarg1, zr); // points to free slot or null
|
||||
|
||||
// find a free slot in the monitor block (result in c_rarg1)
|
||||
{
|
||||
@ -3828,7 +3828,7 @@ void TemplateTable::monitorenter() {
|
||||
void TemplateTable::monitorexit() {
|
||||
transition(atos, vtos);
|
||||
|
||||
// check for NULL object
|
||||
// check for null object
|
||||
__ null_check(x10);
|
||||
|
||||
const Address monitor_block_top(
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -210,7 +210,7 @@ void VM_Version::initialize() {
|
||||
|
||||
char buf[512];
|
||||
buf[0] = '\0';
|
||||
if (_uarch != NULL && strcmp(_uarch, "") != 0) snprintf(buf, sizeof(buf), "%s,", _uarch);
|
||||
if (_uarch != nullptr && strcmp(_uarch, "") != 0) snprintf(buf, sizeof(buf), "%s,", _uarch);
|
||||
strcat(buf, "rv64");
|
||||
#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) if (_features & CPU_##id) strcat(buf, name);
|
||||
CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -52,22 +52,22 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
|
||||
const int stub_code_length = code_size_limit(true);
|
||||
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
// Can be null if there is no free space in the code cache.
|
||||
if (s == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Count unused bytes in instruction sequences of variable size.
|
||||
// We add them to the computed buffer size in order to avoid
|
||||
// overflow in subsequently generated stubs.
|
||||
address start_pc = NULL;
|
||||
address start_pc = nullptr;
|
||||
int slop_bytes = 0;
|
||||
int slop_delta = 0;
|
||||
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), stub_code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
#if (!defined(PRODUCT) && defined(COMPILER2))
|
||||
if (CountCompiledCalls) {
|
||||
@ -122,7 +122,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ beqz(xmethod, L);
|
||||
__ ld(t0, Address(xmethod, Method::from_compiled_offset()));
|
||||
__ bnez(t0, L);
|
||||
__ stop("Vtable entry is NULL");
|
||||
__ stop("Vtable entry is null");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
@ -144,21 +144,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
|
||||
const int stub_code_length = code_size_limit(false);
|
||||
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
// Can be null if there is no free space in the code cache.
|
||||
if (s == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
// Count unused bytes in instruction sequences of variable size.
|
||||
// We add them to the computed buffer size in order to avoid
|
||||
// overflow in subsequently generated stubs.
|
||||
address start_pc = NULL;
|
||||
address start_pc = nullptr;
|
||||
int slop_bytes = 0;
|
||||
int slop_delta = 0;
|
||||
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), stub_code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
assert_cond(masm != NULL);
|
||||
assert_cond(masm != nullptr);
|
||||
|
||||
#if (!defined(PRODUCT) && defined(COMPILER2))
|
||||
if (CountCompiledCalls) {
|
||||
@ -244,7 +244,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
// We force resolving of the call site by jumping to the "handle
|
||||
// wrong method" stub, and so let the interpreter runtime do all the
|
||||
// dirty work.
|
||||
assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
|
||||
assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
masm->flush();
|
||||
|
Loading…
Reference in New Issue
Block a user