8301498: Replace NULL with nullptr in cpu/x86

Reviewed-by: dholmes, kvn
This commit is contained in:
Johan Sjölen 2023-03-22 14:18:40 +00:00
parent ddf1e34c1a
commit 4154a980ca
54 changed files with 656 additions and 656 deletions

View File

@ -214,7 +214,7 @@ void Assembler::init_attributes(void) {
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
NOT_LP64(_is_managed = false;)
_attributes = NULL;
_attributes = nullptr;
}
@ -264,7 +264,7 @@ void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
assert(imm_operand == 0, "default format must be immediate in this file");
assert(inst_mark() != NULL, "must be inside InstructionMark");
assert(inst_mark() != nullptr, "must be inside InstructionMark");
if (rspec.type() != relocInfo::none) {
#ifdef ASSERT
check_relocation(rspec, format);
@ -684,7 +684,7 @@ void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc,
// disp was created by converting the target address minus the pc
// at the start of the instruction. That needs more correction here.
// intptr_t disp = target - next_ip;
assert(inst_mark() != NULL, "must be inside InstructionMark");
assert(inst_mark() != nullptr, "must be inside InstructionMark");
address next_ip = pc() + sizeof(int32_t) + post_addr_length;
int64_t adjusted = disp;
// Do rip-rel adjustment for 64bit
@ -1234,7 +1234,7 @@ address Assembler::locate_next_instruction(address inst) {
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
address inst = inst_mark();
assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
assert(inst != nullptr && inst < pc(), "must point to beginning of instruction");
address opnd;
Relocation* r = rspec.reloc();
@ -1690,8 +1690,8 @@ void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
InstructionMark im(this);
emit_int8((unsigned char)0xE8);
intptr_t disp = entry - (pc() + sizeof(int32_t));
// Entry is NULL in case of a scratch emit.
assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
// Entry is null in case of a scratch emit.
assert(entry == nullptr || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
@ -2404,7 +2404,7 @@ void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
assert((0 <= cc) && (cc < 16), "illegal cc");
if (L.is_bound()) {
address dst = target(L);
assert(dst != NULL, "jcc most probably wrong");
assert(dst != nullptr, "jcc most probably wrong");
const int short_size = 2;
const int long_size = 6;
@ -2462,7 +2462,7 @@ void Assembler::jmp(Address adr) {
void Assembler::jmp(Label& L, bool maybe_short) {
if (L.is_bound()) {
address entry = target(L);
assert(entry != NULL, "jmp most probably wrong");
assert(entry != nullptr, "jmp most probably wrong");
InstructionMark im(this);
const int short_size = 2;
const int long_size = 5;
@ -2493,7 +2493,7 @@ void Assembler::jmp(Register entry) {
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
InstructionMark im(this);
emit_int8((unsigned char)0xE9);
assert(dest != NULL, "must have a target");
assert(dest != nullptr, "must have a target");
intptr_t disp = dest - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (jmp)");
emit_data(disp, rspec, call32_operand);
@ -2503,7 +2503,7 @@ void Assembler::jmpb_0(Label& L, const char* file, int line) {
if (L.is_bound()) {
const int short_size = 2;
address entry = target(L);
assert(entry != NULL, "jmp most probably wrong");
assert(entry != nullptr, "jmp most probably wrong");
#ifdef ASSERT
intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta();
@ -6341,7 +6341,7 @@ void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
relocate(rtype);
if (abort.is_bound()) {
address entry = target(abort);
assert(entry != NULL, "abort entry NULL");
assert(entry != nullptr, "abort entry null");
intptr_t offset = entry - pc();
emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
emit_int32(offset - 6); // 2 opcode + 4 address
@ -12495,7 +12495,7 @@ void Assembler::emit_data64(jlong data,
int format) {
assert(imm_operand == 0, "default format must be immediate in this file");
assert(imm_operand == format, "must be immediate");
assert(inst_mark() != NULL, "must be inside InstructionMark");
assert(inst_mark() != nullptr, "must be inside InstructionMark");
// Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction.
code_section()->relocate(inst_mark(), rspec, format);
@ -13521,13 +13521,13 @@ void Assembler::popq(Register dst) {
// and copying it out on subsequent invocations can thus be beneficial
static bool precomputed = false;
static u_char* popa_code = NULL;
static u_char* popa_code = nullptr;
static int popa_len = 0;
static u_char* pusha_code = NULL;
static u_char* pusha_code = nullptr;
static int pusha_len = 0;
static u_char* vzup_code = NULL;
static u_char* vzup_code = nullptr;
static int vzup_len = 0;
void Assembler::precompute_instructions() {
@ -13574,7 +13574,7 @@ void Assembler::precompute_instructions() {
}
static void emit_copy(CodeSection* code_section, u_char* src, int src_len) {
assert(src != NULL, "code to copy must have been pre-computed");
assert(src != nullptr, "code to copy must have been pre-computed");
assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough");
address end = code_section->end();
memcpy(end, src, src_len);

View File

@ -355,7 +355,7 @@ class AddressLiteral {
// creation
AddressLiteral()
: _is_lval(false),
_target(NULL)
_target(nullptr)
{}
public:
@ -919,7 +919,7 @@ private:
void init_attributes(void);
void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
void clear_attributes(void) { _attributes = NULL; }
void clear_attributes(void) { _attributes = nullptr; }
void set_managed(void) { NOT_LP64(_is_managed = true;) }
void clear_managed(void) { NOT_LP64(_is_managed = false;) }
@ -2884,13 +2884,13 @@ public:
_input_size_in_bits(Assembler::EVEX_NObit),
_evex_encoding(0),
_embedded_opmask_register_specifier(0), // hard code k0
_current_assembler(NULL) { }
_current_assembler(nullptr) { }
~InstructionAttr() {
if (_current_assembler != NULL) {
if (_current_assembler != nullptr) {
_current_assembler->clear_attributes();
}
_current_assembler = NULL;
_current_assembler = nullptr;
}
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ class Bytes: AllStatic {
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
template <typename T>
static inline T get_native(const void* p) {
assert(p != NULL, "null pointer");
assert(p != nullptr, "null pointer");
T x;
@ -50,7 +50,7 @@ class Bytes: AllStatic {
template <typename T>
static inline void put_native(void* p, T x) {
assert(p != NULL, "null pointer");
assert(p != nullptr, "null pointer");
if (is_aligned(p, sizeof(T))) {
*(T*)p = x;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
__ pop(tmp2);
__ pop(tmp1);
#endif /* _LP64 */
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
@ -318,7 +318,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
#ifdef ASSERT
address start = __ pc();
#endif
Metadata* o = NULL;
Metadata* o = nullptr;
__ mov_metadata(_obj, o);
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {
@ -333,7 +333,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
#ifdef ASSERT
address start = __ pc();
#endif
jobject o = NULL;
jobject o = nullptr;
__ movoop(_obj, o);
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {
@ -404,7 +404,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
address target = nullptr;
relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;

View File

@ -144,7 +144,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() {
address LIR_Assembler::float_constant(float f) {
address const_addr = __ float_constant(f);
if (const_addr == NULL) {
if (const_addr == nullptr) {
bailout("const section overflow");
return __ code()->consts()->start();
} else {
@ -155,7 +155,7 @@ address LIR_Assembler::float_constant(float f) {
address LIR_Assembler::double_constant(double d) {
address const_addr = __ double_constant(d);
if (const_addr == NULL) {
if (const_addr == nullptr) {
bailout("const section overflow");
return __ code()->consts()->start();
} else {
@ -321,7 +321,7 @@ void LIR_Assembler::osr_entry() {
Label L;
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("locked object is NULL");
__ stop("locked object is null");
__ bind(L);
}
#endif
@ -373,14 +373,14 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) {
}
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL;
jobject o = nullptr;
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
}
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
Metadata* o = NULL;
Metadata* o = nullptr;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
__ mov_metadata(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
@ -400,7 +400,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
int LIR_Assembler::emit_exception_handler() {
// generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
// not enough space left for the handler
bailout("exception handler overflow");
return -1;
@ -450,7 +450,7 @@ int LIR_Assembler::emit_unwind_handler() {
}
// Perform needed unlocking
MonitorExitStub* stub = NULL;
MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::rax_opr);
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
@ -483,7 +483,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly
if (stub != NULL) {
if (stub != nullptr) {
stub->emit_code(this);
}
@ -494,7 +494,7 @@ int LIR_Assembler::emit_unwind_handler() {
int LIR_Assembler::emit_deopt_handler() {
// generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) {
if (handler_base == nullptr) {
// not enough space left for the handler
bailout("deopt handler overflow");
return -1;
@ -541,7 +541,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
guarantee(info != NULL, "Shouldn't be NULL");
guarantee(info != nullptr, "Shouldn't be null");
int offset = __ offset();
#ifdef _LP64
const Register poll_addr = rscratch1;
@ -733,7 +733,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (c->as_jobject() == nullptr) {
if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), NULL_WORD);
} else {
@ -799,7 +799,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere();
};
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check(null_check_here, info);
}
}
@ -947,7 +947,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
LIR_Address* to_addr = dest->as_address_ptr();
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
Register compressed_src = rscratch1;
if (is_reference_type(type)) {
@ -1043,7 +1043,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
assert(base != from_hi, "can't be");
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
__ movl(as_Address_hi(to_addr), from_hi);
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
@ -1052,7 +1052,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else {
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
__ movl(as_Address_lo(to_addr), from_lo);
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
@ -1080,7 +1080,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
default:
ShouldNotReachHere();
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check(null_check_here, info);
}
@ -1198,12 +1198,12 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
break;
}
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(from_addr.disp() != 0, "must have");
}
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
@ -1270,7 +1270,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
// addresses with 2 registers are only formed as a result of
// array access so this code will never have to deal with
// patches or null checks.
assert(info == NULL && patch == NULL, "must be");
assert(info == nullptr && patch == nullptr, "must be");
__ lea(to_hi, as_Address(addr));
__ movl(to_lo, Address(to_hi, 0));
__ movl(to_hi, Address(to_hi, BytesPerWord));
@ -1278,7 +1278,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
assert(base != to_hi, "can't be");
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
__ movl(to_hi, as_Address_hi(addr));
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
@ -1287,7 +1287,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else {
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
__ movl(to_lo, as_Address_lo(addr));
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
@ -1339,7 +1339,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
ShouldNotReachHere();
}
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
@ -1401,18 +1401,18 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
#ifdef ASSERT
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
if (op->block() != NULL) _branch_target_blocks.append(op->block());
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
if (op->block() != nullptr) _branch_target_blocks.append(op->block());
if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
#endif
if (op->cond() == lir_cond_always) {
if (op->info() != NULL) add_debug_info_for_branch(op->info());
if (op->info() != nullptr) add_debug_info_for_branch(op->info());
__ jmp (*(op->label()));
} else {
Assembler::Condition acond = Assembler::zero;
if (op->code() == lir_cond_float_branch) {
assert(op->ublock() != NULL, "must have unordered successor");
assert(op->ublock() != nullptr, "must have unordered successor");
__ jcc(Assembler::parity, *(op->ublock()->label()));
switch(op->cond()) {
case lir_cond_equal: acond = Assembler::equal; break;
@ -1569,7 +1569,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
}
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
assert(op->stub() != NULL, "stub required");
assert(op->stub() != nullptr, "stub required");
__ cmpl(dest->as_register(), 0x80000000);
__ jcc(Assembler::equal, *op->stub()->entry());
__ bind(*op->stub()->continuation());
@ -1682,17 +1682,17 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
// check if it needs to be profiled
ciMethodData* md = NULL;
ciProfileData* data = NULL;
ciMethodData* md = nullptr;
ciProfileData* data = nullptr;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
assert(method != nullptr, "Should have method");
int bci = op->profiled_bci();
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check");
assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
Label profile_cast_success, profile_cast_failure;
@ -1798,7 +1798,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
}
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
@ -1842,17 +1842,17 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub();
// check if it needs to be profiled
ciMethodData* md = NULL;
ciProfileData* data = NULL;
ciMethodData* md = nullptr;
ciProfileData* data = nullptr;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
assert(method != nullptr, "Should have method");
int bci = op->profiled_bci();
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check");
assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
Label profile_cast_success, profile_cast_failure, done;
@ -1882,7 +1882,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
@ -2021,7 +2021,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr1->is_stack()) {
stack2reg(opr1, result, result->type());
} else if (opr1->is_constant()) {
const2reg(opr1, result, lir_patch_none, NULL);
const2reg(opr1, result, lir_patch_none, nullptr);
} else {
ShouldNotReachHere();
}
@ -2053,7 +2053,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr2->is_stack()) {
stack2reg(opr2, result, result->type());
} else if (opr2->is_constant()) {
const2reg(opr2, result, lir_patch_none, NULL);
const2reg(opr2, result, lir_patch_none, nullptr);
} else {
ShouldNotReachHere();
}
@ -2063,7 +2063,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
if (left->is_single_cpu()) {
assert(left == dest, "left and dest must be equal");
@ -2259,7 +2259,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
raddr = frame_map()->address_for_slot(right->single_stack_ix());
} else if (right->is_constant()) {
address const_addr = float_constant(right->as_jfloat());
assert(const_addr != NULL, "incorrect float/double constant maintenance");
assert(const_addr != nullptr, "incorrect float/double constant maintenance");
// hack for now
raddr = __ as_Address(InternalAddress(const_addr));
} else {
@ -2666,10 +2666,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint());
} else if (c->type() == T_METADATA) {
// All we need for now is a comparison with NULL for equality.
// All we need for now is a comparison with null for equality.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata();
if (m == NULL) {
if (m == nullptr) {
__ cmpptr(reg1, NULL_WORD);
} else {
ShouldNotReachHere();
@ -2677,7 +2677,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
} else if (is_reference_type(c->type())) {
// In 64bit oops are single register
jobject o = c->as_jobject();
if (o == NULL) {
if (o == nullptr) {
__ cmpptr(reg1, NULL_WORD);
} else {
__ cmpoop(reg1, o, rscratch1);
@ -2687,7 +2687,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
}
// cpu register - address
} else if (opr2->is_address()) {
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
__ cmpl(reg1, as_Address(opr2->as_address_ptr()));
@ -2737,7 +2737,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
} else if (opr2->is_address()) {
// xmm register - address
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
@ -2758,7 +2758,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
} else if (opr2->is_address()) {
// xmm register - address
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
@ -2781,7 +2781,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ movoop(rscratch1, c->as_jobject());
}
#endif // LP64
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
}
// special case: address - constant
@ -2887,7 +2887,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) {
if (stub == nullptr) {
bailout("static call stub overflow");
return;
}
@ -2897,7 +2897,7 @@ void LIR_Assembler::emit_static_call_stub() {
// make sure that the displacement word of the call ends up word aligned
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
__ relocate(static_stub_Relocation::spec(call_pc));
__ mov_metadata(rbx, (Metadata*)NULL);
__ mov_metadata(rbx, (Metadata*)nullptr);
// must be set to -1 at code generation time
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
@ -3073,11 +3073,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) basic_type = T_OBJECT;
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) {
if (default_type == nullptr) {
// save outgoing arguments on stack in case call to System.arraycopy is needed
// HACK ALERT. This code used to push the parameters in a hardwired fashion
// for interpreter calling conventions. Now we have to do it in new style conventions.
@ -3096,7 +3096,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
#ifdef _LP64
@ -3169,7 +3169,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
return;
}
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
Address::ScaleFactor scale;
@ -3199,7 +3199,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// length and pos's are all sign extended at this point on 64bit
// test for NULL
// test for null
if (flags & LIR_OpArrayCopy::src_null_check) {
__ testptr(src, src);
__ jcc(Assembler::zero, *stub->entry());
@ -3280,7 +3280,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ load_klass(src, src, tmp_load_klass);
__ load_klass(dst, dst, tmp_load_klass);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
__ push(src);
__ push(dst);
@ -3296,7 +3296,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ pop(src);
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
if (copyfunc_addr != NULL) { // use stub if available
if (copyfunc_addr != nullptr) { // use stub if available
// src is not a sub class of dst so we have to do a
// per-element check.
@ -3501,7 +3501,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
if (UseHeavyMonitors) {
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
@ -3510,7 +3510,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != NULL) {
if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
// done
@ -3528,7 +3528,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info();
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
@ -3549,9 +3549,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types
ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity");
assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding());
@ -3564,7 +3564,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
@ -3589,7 +3589,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -3626,7 +3626,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Label update, next, none;
bool do_null = !not_null;
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
assert(do_null || do_update, "why are we here?");
@ -3661,7 +3661,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (do_update) {
#ifdef ASSERT
if (exact_klass != NULL) {
if (exact_klass != nullptr) {
Label ok;
__ load_klass(tmp, tmp, tmp_load_klass);
__ push(tmp);
@ -3674,8 +3674,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
}
#endif
if (!no_conflict) {
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != NULL) {
if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != nullptr) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
} else {
__ load_klass(tmp, tmp, tmp_load_klass);
@ -3703,7 +3703,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ jccb(Assembler::zero, next);
}
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ movptr(tmp, mdo_addr);
@ -3723,7 +3723,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
}
} else {
// There's a single possible klass at this profile point
assert(exact_klass != NULL, "should be");
assert(exact_klass != nullptr, "should be");
if (TypeEntries::is_type_none(current_klass)) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ xorptr(tmp, mdo_addr);
@ -3754,7 +3754,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// first time here. Set profile type.
__ movptr(mdo_addr, tmp);
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ movptr(tmp, mdo_addr);
@ -3861,7 +3861,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
assert(src->is_address(), "must be an address");
assert(dest->is_register(), "must be a register");
PatchingStub* patch = NULL;
PatchingStub* patch = nullptr;
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
}
@ -3870,7 +3870,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
LIR_Address* addr = src->as_address_ptr();
__ lea(reg, as_Address(addr));
if (patch != NULL) {
if (patch != nullptr) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
}
@ -3880,7 +3880,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
assert(!tmp->is_valid(), "don't need temporary");
__ call(RuntimeAddress(dest));
if (info != NULL) {
if (info != nullptr) {
add_call_info_here(info);
}
__ post_call_nop();
@ -3890,7 +3890,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
assert(type == T_LONG, "only for volatile long fields");
if (info != NULL) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}

View File

@ -125,7 +125,7 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
return false;
}
Constant* c = v->as_Constant();
if (c && c->state_before() == NULL) {
if (c && c->state_before() == nullptr) {
// constants of any type can be stored directly, except for
// unloaded object constants.
return true;
@ -143,7 +143,7 @@ bool LIRGenerator::can_inline_as_constant(Value v) const {
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
if (c->type() == T_LONG) return false;
return c->type() != T_OBJECT || c->as_jobject() == NULL;
return c->type() != T_OBJECT || c->as_jobject() == nullptr;
}
@ -312,7 +312,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL;
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
@ -383,7 +383,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
bool must_load_right = false;
if (right.is_constant()) {
LIR_Const* c = right.result()->as_constant_ptr();
assert(c != NULL, "invalid constant");
assert(c != nullptr, "invalid constant");
assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
if (c->type() == T_FLOAT) {
@ -429,7 +429,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
left.load_item_force(cc->at(0));
right.load_item_force(cc->at(1));
address entry = NULL;
address entry = nullptr;
switch (x->op()) {
case Bytecodes::_frem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
@ -499,7 +499,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
address entry = NULL;
address entry = nullptr;
switch (x->op()) {
case Bytecodes::_lrem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
@ -527,7 +527,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
right.load_item();
LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
} else {
@ -539,7 +539,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
// don't load constants to save register
right.load_nonconstant();
rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
}
}
@ -583,7 +583,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
info = nullptr;
}
LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
if (x->op() == Bytecodes::_irem) {
@ -650,7 +650,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
// when an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
@ -691,7 +691,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
void LIRGenerator::do_LogicOp(LogicOp* x) {
// when an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
@ -867,7 +867,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
LIR_Opr calc_result = rlock_result(x);
LIR_Opr result_reg = result_register_for(x->type());
CallingConvention* cc = NULL;
CallingConvention* cc = nullptr;
if (x->id() == vmIntrinsics::_dpow) {
LIRItem value1(x->argument_at(1), this);
@ -892,49 +892,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
result_reg = tmp;
switch(x->id()) {
case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) {
if (StubRoutines::dexp() != nullptr) {
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) {
if (StubRoutines::dlog() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) {
if (StubRoutines::dlog10() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) {
if (StubRoutines::dpow() != nullptr) {
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dsin:
if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) {
if (VM_Version::supports_sse2() && StubRoutines::dsin() != nullptr) {
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dcos:
if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) {
if (VM_Version::supports_sse2() && StubRoutines::dcos() != nullptr) {
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) {
if (StubRoutines::dtan() != nullptr) {
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
@ -945,49 +945,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
#else
switch (x->id()) {
case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) {
if (StubRoutines::dexp() != nullptr) {
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) {
if (StubRoutines::dlog() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) {
if (StubRoutines::dlog10() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) {
if (StubRoutines::dpow() != nullptr) {
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dsin:
if (StubRoutines::dsin() != NULL) {
if (StubRoutines::dsin() != nullptr) {
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dcos:
if (StubRoutines::dcos() != NULL) {
if (StubRoutines::dcos() != nullptr) {
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) {
if (StubRoutines::dtan() != nullptr) {
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
@ -1259,7 +1259,7 @@ void LIRGenerator::do_Convert(Convert* x) {
// arguments of lir_convert
LIR_Opr conv_input = input;
LIR_Opr conv_result = result;
ConversionStub* stub = NULL;
ConversionStub* stub = nullptr;
if (fixed_input) {
conv_input = fixed_register_for(input->type());
@ -1335,7 +1335,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this);
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
@ -1368,14 +1368,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims();
int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL);
LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
@ -1424,7 +1424,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization)
@ -1439,10 +1439,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
CodeStub* stub;
if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
} else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this");
assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
} else {
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
@ -1464,7 +1464,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
// result and test object may not be in same register
LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL;
CodeEmitInfo* patching_info = nullptr;
if ((!x->klass()->is_loaded() || PatchALot)) {
// must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ void LinearScan::allocate_fpu_stack() {
// (To minimize the amount of work we have to do if we have to merge FPU stacks)
if (ComputeExactFPURegisterUsage) {
Interval* intervals_in_register, *intervals_in_memory;
create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL);
create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, nullptr);
// ignore memory intervals by overwriting intervals_in_memory
// the dummy interval is needed to enforce the walker to walk until the given id:
@ -109,14 +109,14 @@ void LinearScan::allocate_fpu_stack() {
FpuStackAllocator alloc(ir()->compilation(), this);
_fpu_stack_allocator = &alloc;
alloc.allocate();
_fpu_stack_allocator = NULL;
_fpu_stack_allocator = nullptr;
}
FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
: _compilation(compilation)
, _allocator(allocator)
, _lir(NULL)
, _lir(nullptr)
, _pos(-1)
, _sim(compilation)
, _temp_sim(compilation)
@ -136,14 +136,14 @@ void FpuStackAllocator::allocate() {
}
#endif
assert(fpu_stack_state != NULL ||
block->end()->as_Base() != NULL ||
assert(fpu_stack_state != nullptr ||
block->end()->as_Base() != nullptr ||
block->is_set(BlockBegin::exception_entry_flag),
"FPU stack state must be present due to linear-scan order for FPU stack allocation");
// note: exception handler entries always start with an empty fpu stack
// because stack merging would be too complicated
if (fpu_stack_state != NULL) {
if (fpu_stack_state != nullptr) {
sim()->read_state(fpu_stack_state);
} else {
sim()->clear();
@ -186,7 +186,7 @@ void FpuStackAllocator::allocate_block(BlockBegin* block) {
LIR_Op2* op2 = op->as_Op2();
LIR_OpCall* opCall = op->as_OpCall();
if (branch != NULL && branch->block() != NULL) {
if (branch != nullptr && branch->block() != nullptr) {
if (!processed_merge) {
// propagate stack at first branch to a successor
processed_merge = true;
@ -195,11 +195,11 @@ void FpuStackAllocator::allocate_block(BlockBegin* block) {
assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
}
} else if (op1 != NULL) {
} else if (op1 != nullptr) {
handle_op1(op1);
} else if (op2 != NULL) {
} else if (op2 != nullptr) {
handle_op2(op2);
} else if (opCall != NULL) {
} else if (opCall != nullptr) {
handle_opCall(opCall);
}
@ -256,7 +256,7 @@ void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
}
#endif
if (xhandler->entry_code() == NULL) {
if (xhandler->entry_code() == nullptr) {
// need entry code to clear FPU stack
LIR_List* entry_code = new LIR_List(_compilation);
entry_code->jump(xhandler->entry_block());
@ -280,7 +280,7 @@ void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
switch (op->code()) {
case lir_move:
assert(op->as_Op1() != NULL, "must be LIR_Op1");
assert(op->as_Op1() != nullptr, "must be LIR_Op1");
assert(pos() != insts->length() - 1, "must not be last operation");
handle_op1((LIR_Op1*)op);
@ -1042,7 +1042,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
intArray* state = sux->fpu_stack_state();
LIR_List* instrs = new LIR_List(_compilation);
if (state != NULL) {
if (state != nullptr) {
// Merge with a successors that already has a FPU stack state
// the block must only have one successor because critical edges must been split
FpuStackSim* cur_sim = sim();
@ -1088,7 +1088,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
}
// check if new state is same
if (sux->fpu_stack_state() != NULL) {
if (sux->fpu_stack_state() != nullptr) {
intArray* sux_state = sux->fpu_stack_state();
assert(state->length() == sux_state->length(), "overwriting existing stack state");
for (int j = 0; j < state->length(); j++) {
@ -1114,7 +1114,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
BlockBegin* sux = block->sux_at(i);
intArray* sux_state = sux->fpu_stack_state();
assert(sux_state != NULL, "no fpu state");
assert(sux_state != nullptr, "no fpu state");
assert(cur_state->length() == sux_state->length(), "incorrect length");
for (int i = 0; i < cur_state->length(); i++) {
assert(cur_state->at(i) == sux_state->at(i), "element not equal");

View File

@ -89,7 +89,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
subptr(hdr, rsp);
andptr(hdr, aligned_mask - (int)os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)
// location (null in the displaced hdr location indicates recursive locking)
movptr(Address(disp_hdr, 0), hdr);
// otherwise we don't care about the result and handle locking via runtime call
jcc(Assembler::notZero, slow_case);
@ -110,7 +110,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// load displaced header
movptr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking
// if the loaded hdr is null we had recursive locking
testptr(hdr, hdr);
// if we had recursive locking, we are done
jcc(Assembler::zero, done);
@ -279,7 +279,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver);
// explicit NULL check not needed since load from [klass_offset] causes a trap
// explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
@ -322,7 +322,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
// C1 code is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */);
bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -119,7 +119,7 @@
void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN;
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
void load_parameter(int offset_in_words, Register reg);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
int call_offset = -1;
if (!align_stack) {
set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1);
set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
} else {
address the_pc = pc();
call_offset = offset();
@ -673,7 +673,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// Save registers, if required.
OopMapSet* oop_maps = new OopMapSet();
OopMap* oop_map = NULL;
OopMap* oop_map = nullptr;
switch (id) {
case forward_exception_id:
// We're handling an exception in the context of a compiled frame.
@ -870,7 +870,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
const int num_rt_args = 2; // thread + dummy
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
OopMap* oop_map = save_live_registers(sasm, num_rt_args);
@ -886,7 +886,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
__ get_thread(thread);
__ push(thread);
#endif // _LP64
__ set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1);
__ set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
// do the call
__ call(RuntimeAddress(target));
OopMapSet* oop_maps = new OopMapSet();
@ -1000,7 +1000,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
bool save_fpu_registers = true;
// stub code & info for the different stubs
OopMapSet* oop_maps = NULL;
OopMapSet* oop_maps = nullptr;
switch (id) {
case forward_exception_id:
{
@ -1267,7 +1267,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
Label miss;
__ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
__ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, nullptr, &miss);
// fallthrough on success:
__ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
@ -1342,7 +1342,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ leave();
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
}
@ -1492,7 +1492,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
restore_live_registers(sasm);
__ leave();
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ int C2SafepointPollStub::max_size() const {
}
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();

View File

@ -130,7 +130,7 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool
if (!is_stub) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
#ifdef _LP64
if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
if (BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
// We put the non-hot code of the nmethod entry barrier out-of-line in a stub.
Label dummy_slow_path;
Label dummy_continuation;
@ -147,7 +147,7 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool
}
#else
// Don't bother with out-of-line nmethod entry barrier stub for x86_32.
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */);
bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */);
#endif
}
}
@ -228,7 +228,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
imulptr(scrReg, scrReg, RTMAbortRatio);
cmpptr(tmpReg, scrReg);
jccb(Assembler::below, L_check_always_rtm1);
if (method_data != NULL) {
if (method_data != nullptr) {
// set rtm_state to "no rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
@ -242,7 +242,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
jccb(Assembler::below, L_done);
if (method_data != NULL) {
if (method_data != nullptr) {
// set rtm_state to "always rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
@ -260,7 +260,7 @@ void C2_MacroAssembler::rtm_profiling(Register abort_status_Reg,
Metadata* method_data,
bool profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
assert(rtm_counters != nullptr, "should not be null when profiling RTM");
// update rtm counters based on rax value at abort
// reads abort_status_Reg, updates flags
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
@ -270,7 +270,7 @@ void C2_MacroAssembler::rtm_profiling(Register abort_status_Reg,
if (RTMRetryCount > 0) {
push(abort_status_Reg);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
assert(rtm_counters != nullptr, "should not be null when profiling RTM");
rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
// restore abort status
if (RTMRetryCount > 0) {
@ -356,7 +356,7 @@ void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Regi
// tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
assert(stack_rtm_counters != nullptr, "should not be null when profiling RTM");
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
@ -416,7 +416,7 @@ void C2_MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, R
// tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
assert(rtm_counters != nullptr, "should not be null when profiling RTM");
atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
@ -661,7 +661,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
lock();
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
// If we weren't able to swing _owner from NULL to the BasicLock
// If we weren't able to swing _owner from null to the BasicLock
// then take the slow path.
jccb (Assembler::notZero, NO_COUNT);
// update _owner from BasicLock to thread

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ int IntelJccErratum::jcc_erratum_taint_node(MachNode* node, PhaseRegAlloc* regal
int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegAlloc* regalloc) {
ResourceMark rm;
int nop_size = 0;
MachNode* last_m = NULL;
MachNode* last_m = nullptr;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
const Block* const block = cfg->get_block(i);
@ -86,7 +86,7 @@ int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegA
}
}
}
last_m = NULL;
last_m = nullptr;
} else {
last_m = m;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// movq rbx, 0
// jmp -5 # to self
if (mark == NULL) {
if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
}
@ -50,8 +50,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
if (base == NULL) {
return NULL; // CodeBuffer::expand failed.
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed.
}
// Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
@ -82,7 +82,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
guarantee(stub != nullptr, "stub not found");
if (TraceICs) {
ResourceMark rm;
@ -108,7 +108,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
assert(stub != nullptr, "stub not found");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
method_holder->set_data(0);
@ -128,12 +128,12 @@ void CompiledDirectStaticCall::verify() {
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob((address) _call);
assert(cb != NULL, "sanity");
assert(cb != nullptr, "sanity");
#endif
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());

View File

@ -140,7 +140,7 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|| (f.unextended_sp() == f.sp()), "");
assert(f.fp() > (intptr_t*)f.at(frame::interpreter_frame_initial_sp_offset), "");
// at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts
// at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts
*hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp();
// Make sure that locals is already relativized.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@
// the perfect job. In those cases, decode_instruction0 may kick in
// and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) {
static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
return here;
}

View File

@ -71,7 +71,7 @@ public:
_captured_state_mask(captured_state_mask),
_frame_complete(0),
_frame_size_slots(0),
_oop_maps(NULL) {
_oop_maps(nullptr) {
}
void generate();

View File

@ -87,7 +87,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code
if (_cb != NULL ) {
if (_cb != nullptr ) {
// First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod
@ -113,10 +113,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
return fp_safe;
}
intptr_t* sender_sp = NULL;
intptr_t* sender_unextended_sp = NULL;
address sender_pc = NULL;
intptr_t* saved_fp = NULL;
intptr_t* sender_sp = nullptr;
intptr_t* sender_unextended_sp = nullptr;
address sender_pc = nullptr;
intptr_t* saved_fp = nullptr;
if (is_interpreted_frame()) {
// fp must be safe
@ -181,7 +181,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
if (sender_pc == nullptr || sender_blob == nullptr) {
return false;
}
@ -214,7 +214,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
if (nm != NULL) {
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
return false;
@ -256,7 +256,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
if ( (address) this->fp()[return_addr_offset] == NULL) return false;
if ( (address) this->fp()[return_addr_offset] == nullptr) return false;
// could try and do some more potential verification of native frame if we could think of some...
@ -284,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
*pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
_pc = original_pc;
@ -356,7 +356,7 @@ void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
}
frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -383,11 +383,11 @@ bool frame::upcall_stub_frame_is_first() const {
assert(is_upcall_stub_frame(), "must be optimzed entry frame");
UpcallStub* blob = _cb->as_upcall_stub();
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
return jfa->last_Java_sp() == NULL;
return jfa->last_Java_sp() == nullptr;
}
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
UpcallStub* blob = _cb->as_upcall_stub();
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
@ -432,9 +432,9 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
if (_cb != NULL) {
if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != NULL) {
if (sender_cm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
@ -560,7 +560,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
} else {
oop* obj_p = (oop*)tos_addr;
obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
}
assert(Universe::is_in_heap_or_null(obj), "sanity check");
*oop_result = obj;
@ -659,10 +659,10 @@ frame::frame(void* sp, void* fp, void* pc) {
void JavaFrameAnchor::make_walkable() {
// last frame set?
if (last_Java_sp() == NULL) return;
if (last_Java_sp() == nullptr) return;
// already walkable?
if (walkable()) return;
vmassert(last_Java_pc() == NULL, "already walkable");
vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong");
}

View File

@ -39,13 +39,13 @@
// Constructors:
inline frame::frame() {
_pc = NULL;
_sp = NULL;
_unextended_sp = NULL;
_fp = NULL;
_cb = NULL;
_pc = nullptr;
_sp = nullptr;
_unextended_sp = nullptr;
_fp = nullptr;
_cb = nullptr;
_deopt_state = unknown;
_oop_map = NULL;
_oop_map = nullptr;
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
}
@ -55,11 +55,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_unextended_sp = sp;
_fp = fp;
_pc = pc;
_oop_map = NULL;
_oop_map = nullptr;
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
assert(pc != NULL, "no pc?");
assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
setup(pc);
}
@ -68,10 +68,10 @@ inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {
@ -91,10 +91,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_unextended_sp = unextended_sp;
_fp = fp;
_pc = pc;
assert(pc != NULL, "no pc?");
assert(pc != nullptr, "no pc?");
_cb = cb;
_oop_map = NULL;
assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
_oop_map = nullptr;
assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
@ -115,7 +115,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
// In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
if (cb != NULL) {
if (cb != nullptr) {
setup(pc);
}
#ifdef ASSERT
@ -132,10 +132,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_unextended_sp = unextended_sp;
_fp = fp;
_pc = pc;
assert(pc != NULL, "no pc?");
assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob_fast(pc);
_oop_map = NULL;
assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
_oop_map = nullptr;
assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
@ -160,19 +160,19 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// Then we could use the assert below. However this assert is of somewhat dubious
// value.
// UPDATE: this constructor is only used by trace_method_handle_stub() now.
// assert(_pc != NULL, "no pc?");
// assert(_pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
_oop_map = NULL;
_oop_map = nullptr;
}
// Accessors
@ -187,19 +187,19 @@ inline bool frame::equal(frame other) const {
}
// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. NULL represents an invalid (incomparable)
// identity and younger/older relationship. null represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
return this->id() > id ; }
inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); }
inline intptr_t* frame::link_or_null() const {
intptr_t** ptr = (intptr_t **)addr_at(link_offset);
return os::is_readable_pointer(ptr) ? *ptr : NULL;
return os::is_readable_pointer(ptr) ? *ptr : nullptr;
}
inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
@ -208,7 +208,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); retu
inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
inline intptr_t* frame::real_fp() const {
if (_cb != NULL) {
if (_cb != nullptr) {
// use the frame size if valid
int size = _cb->frame_size();
if (size > 0) {
@ -232,7 +232,7 @@ inline int frame::compiled_frame_stack_argsize() const {
}
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
assert(mask != NULL, "");
assert(mask != nullptr, "");
Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci();
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
@ -285,7 +285,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp();
if (last_sp == NULL) {
if (last_sp == nullptr) {
return sp();
} else {
// sp() may have been extended or shrunk by an adapter. At least
@ -323,13 +323,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location");
guarantee(result_adr != nullptr, "bad register save location");
return *result_adr;
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location");
guarantee(result_adr != nullptr, "bad register save location");
*result_adr = obj;
}
@ -343,17 +343,17 @@ inline int frame::sender_sp_ret_address_offset() {
}
inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL;
if (_cb->oop_maps() != NULL) {
if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) {
if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc);
}
const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map;
}
return NULL;
return nullptr;
}
//------------------------------------------------------------------------------
@ -383,7 +383,7 @@ inline frame frame::sender_raw(RegisterMap* map) const {
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
if (_cb != NULL) return sender_for_compiled_frame(map);
if (_cb != nullptr) return sender_for_compiled_frame(map);
// Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system.
@ -391,7 +391,7 @@ inline frame frame::sender_raw(RegisterMap* map) const {
}
inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
// frame owned by optimizing compiler
assert(_cb->frame_size() > 0, "must have non-zero frame size");
@ -412,13 +412,13 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != NULL) {
if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map);
}
} else {
assert(!_cb->caller_must_gc_arguments(map->thread()), "");
assert(!map->include_argument_oops(), "");
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
}
// Since the prolog does the save and restore of EBP there is no oopmap

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -230,7 +230,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
// Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
//
// If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to
@ -291,12 +291,12 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
__ jcc(Assembler::equal, done);
// crosses regions, storing NULL?
// crosses regions, storing null?
__ cmpptr(new_val, NULL_WORD);
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, is card already dirty?
// storing region crossing non-null, is card already dirty?
const Register card_addr = tmp;
const Register cardtable = tmp2;
@ -316,7 +316,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean.
// storing a region crossing, non-null oop, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), G1CardTable::dirty_card_val());
@ -519,7 +519,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Label enqueued;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions.
// At this point we know new_value is non-null and the new_value crosses regions.
// Must check to see if card is already dirty
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
@ -549,7 +549,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
__ cmpb(Address(card_addr, 0), CardTable::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// storing region crossing non-null, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), CardTable::dirty_card_val());

View File

@ -382,7 +382,7 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register th
#ifdef _LP64
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) {
if (bs_nm == nullptr) {
return;
}
Register thread = r15_thread;
@ -396,7 +396,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
uintptr_t after_cmp = (uintptr_t)__ pc();
guarantee(after_cmp - before_cmp == 8, "Wrong assumed instruction length");
if (slow_path != NULL) {
if (slow_path != nullptr) {
__ jcc(Assembler::notEqual, *slow_path);
__ bind(*continuation);
} else {
@ -409,7 +409,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
#else
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, Label*) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) {
if (bs_nm == nullptr) {
return;
}
@ -430,7 +430,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, La
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) {
if (bs == nullptr) {
return;
}

View File

@ -264,7 +264,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
// Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
//
// If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to
@ -703,7 +703,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
// Before reaching to resolve sequence, see if we can avoid the whole shebang
// with filters.
// Filter: when offending in-memory value is NULL, the failure is definitely legitimate
// Filter: when offending in-memory value is null, the failure is definitely legitimate
__ testptr(oldval, oldval);
__ jcc(Assembler::zero, L_failure);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached
// because
// (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
// assert(cached_value == NULL || cached_oop->is_perm(), "must be perm oop");
// assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
masm->jump(ExternalAddress(entry_point));
}

View File

@ -268,7 +268,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL");
" last_sp != null");
bind(L);
}
#endif
@ -300,7 +300,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base:"
" last_sp != NULL");
" last_sp != nullptr");
bind(L);
}
#endif /* ASSERT */
@ -399,7 +399,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
testptr(tmp, tmp);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == nullptr) exit;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
@ -1377,7 +1377,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
push(rbx);
get_method(rbx);
// Test MDO to avoid the call if it is NULL.
// Test MDO to avoid the call if it is null.
movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
testptr(rax, rax);
jcc(Assembler::zero, set_mdp);
@ -1760,7 +1760,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is NULL.
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
@ -1776,13 +1776,13 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != NULL) {
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != NULL) {
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
@ -1988,7 +1988,7 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Ad
incrementl(scratch, InvocationCounter::count_increment);
movl(counter_addr, scratch);
andl(scratch, mask);
if (where != NULL) {
if (where != nullptr) {
jcc(Assembler::zero, *where);
}
}

View File

@ -179,7 +179,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
// null last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
NOT_LP64(empty_FPU_stack());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,10 @@ public:
void clear(void) {
// clearing _last_Java_sp must be first
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_last_Java_fp = nullptr;
_last_Java_pc = nullptr;
}
void copy(JavaFrameAnchor* src) {
@ -50,11 +50,11 @@ public:
// We must clear _last_Java_sp before copying the rest of the new data
//
// Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
// To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing
//
if (_last_Java_sp != src->_last_Java_sp)
_last_Java_sp = NULL;
_last_Java_sp = nullptr;
_last_Java_fp = src->_last_Java_fp;
_last_Java_pc = src->_last_Java_pc;
@ -62,7 +62,7 @@ public:
_last_Java_sp = src->_last_Java_sp;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; }
void make_walkable();
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }

View File

@ -51,7 +51,7 @@ GetDoubleField_t JNI_FastGetField::jni_fast_GetDoubleField_fp;
// between loads, which is much more efficient than lfence.
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name = NULL;
const char *name = nullptr;
switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -128,7 +128,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr = NULL;
address slow_case_addr = nullptr;
switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -264,7 +264,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name = NULL;
const char *name = nullptr;
switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -343,7 +343,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr = NULL;
address slow_case_addr = nullptr;
switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;

View File

@ -49,7 +49,7 @@ static const Register roffset = r10;
static const Register rcounter = r11;
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name = NULL;
const char *name = nullptr;
switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -107,7 +107,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr = NULL;
address slow_case_addr = nullptr;
switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -150,7 +150,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name = NULL;
const char *name = nullptr;
switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -199,7 +199,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr = NULL;
address slow_case_addr = nullptr;
switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
}
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, methodHandle& method, jint pc_offset, JVMCI_TRAPS) {
NativeCall* call = NULL;
NativeCall* call = nullptr;
switch (_next_call_type) {
case INLINE_INVOKE:
return;

View File

@ -1036,7 +1036,7 @@ void MacroAssembler::object_move(OopMap* map,
Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
// See if oop is NULL if it is we need no handle
// See if oop is null if it is we need no handle
if (src.first()->is_stack()) {
@ -1049,12 +1049,12 @@ void MacroAssembler::object_move(OopMap* map,
cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
lea(rHandle, Address(rbp, reg2offset_in(src.first())));
// conditionally move a NULL
// conditionally move a null
cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
} else {
// Oop is in a register we must store it to the space we reserve
// on the stack for oop_handles and pass a handle if oop is non-NULL
// on the stack for oop_handles and pass a handle if oop is non-null
const Register rOop = src.first()->as_Register();
int oop_slot;
@ -1077,7 +1077,7 @@ void MacroAssembler::object_move(OopMap* map,
int offset = oop_slot*VMRegImpl::stack_slot_size;
map->set_oop(VMRegImpl::stack2reg(oop_slot));
// Store oop in handle area, may be NULL
// Store oop in handle area, may be null
movptr(Address(rsp, offset), rOop);
if (is_receiver) {
*receiver_offset = offset;
@ -1085,7 +1085,7 @@ void MacroAssembler::object_move(OopMap* map,
cmpptr(rOop, NULL_WORD);
lea(rHandle, Address(rsp, offset));
// conditionally move a NULL from the handle area where it was just stored
// conditionally move a null from the handle area where it was just stored
cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
}
@ -1344,7 +1344,7 @@ void MacroAssembler::ic_call(address entry, jint method_index) {
void MacroAssembler::emit_static_call_stub() {
// Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time.
mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code.
jump(RuntimeAddress(pc()));
}
@ -1562,7 +1562,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
assert(last_java_sp != rbp, "can't use ebp/rbp");
// Only interpreter should have to set fp
set_last_Java_frame(java_thread, last_java_sp, rbp, NULL, rscratch1);
set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1);
// do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
@ -2854,7 +2854,7 @@ void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratc
void MacroAssembler::null_check(Register reg, int offset) {
if (needs_explicit_null_check(offset)) {
// provoke OS NULL exception if reg = NULL by
// provoke OS null exception if reg is null by
// accessing M[reg] w/o changing any (non-CC) registers
// NOTE: cmpl is plenty here to provoke a segv
cmpptr(rax, Address(reg, 0));
@ -2863,7 +2863,7 @@ void MacroAssembler::null_check(Register reg, int offset) {
// testl needs to be implemented first)
} else {
// nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL
// will provoke OS null exception if reg is null
}
}
@ -2874,7 +2874,7 @@ void MacroAssembler::os_breakpoint() {
}
void MacroAssembler::unimplemented(const char* what) {
const char* buf = NULL;
const char* buf = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -3105,7 +3105,7 @@ void MacroAssembler::set_last_Java_frame(Register java_thread,
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
}
// last_java_pc is optional
if (last_java_pc != NULL) {
if (last_java_pc != nullptr) {
Address java_pc(java_thread,
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
lea(java_pc, InternalAddress(last_java_pc), rscratch);
@ -3887,7 +3887,7 @@ void MacroAssembler::resolve_jobject(Register value,
assert_different_registers(value, thread, tmp);
Label done, tagged, weak_tagged;
testptr(value, value);
jcc(Assembler::zero, done); // Use NULL as-is.
jcc(Assembler::zero, done); // Use null as-is.
testptr(value, JNIHandles::tag_mask); // Test for tag.
jcc(Assembler::notZero, tagged);
@ -3921,7 +3921,7 @@ void MacroAssembler::resolve_global_jobject(Register value,
Label done;
testptr(value, value);
jcc(Assembler::zero, done); // Use NULL as-is.
jcc(Assembler::zero, done); // Use null as-is.
#ifdef ASSERT
{
@ -4262,7 +4262,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
}
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
// for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
// if (scan->interface() == intf) {
// result = (klass + scan->offset() + itable_index);
// }
@ -4320,8 +4320,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
Register temp_reg,
Label& L_success) {
Label L_failure;
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
bind(L_failure);
}
@ -4344,10 +4344,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one null in the batch");
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
@ -4443,9 +4443,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch");
if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one null in the batch");
// a couple of useful fields in sub_klass:
int ss_offset = in_bytes(Klass::secondary_supers_offset());
@ -4501,7 +4501,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
if (set_cond_codes) {
// Special hack for the AD files: rdi is guaranteed non-zero.
assert(!pushed_rdi, "rdi must be left non-NULL");
assert(!pushed_rdi, "rdi must be left non-null");
// Also, the condition codes are properly set Z/NZ on succeed/failure.
}
@ -4522,12 +4522,12 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
}
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
Label L_fallthrough;
if (L_fast_path == NULL) {
if (L_fast_path == nullptr) {
L_fast_path = &L_fallthrough;
} else if (L_slow_path == NULL) {
} else if (L_slow_path == nullptr) {
L_slow_path = &L_fallthrough;
}
@ -4581,7 +4581,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
push(reg); // pass register argument
// Pass register number to verify_oop_subroutine
const char* b = NULL;
const char* b = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -4651,7 +4651,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
}
// Pass register number to verify_oop_subroutine
const char* b = NULL;
const char* b = nullptr;
{
ResourceMark rm;
stringStream ss;
@ -4720,7 +4720,7 @@ class ControlWord {
case 2: rc = "round up "; break;
case 3: rc = "chop "; break;
default:
rc = NULL; // silence compiler warnings
rc = nullptr; // silence compiler warnings
fatal("Unknown rounding control: %d", rounding_control());
};
// precision control
@ -4731,7 +4731,7 @@ class ControlWord {
case 2: pc = "53 bits "; break;
case 3: pc = "64 bits "; break;
default:
pc = NULL; // silence compiler warnings
pc = nullptr; // silence compiler warnings
fatal("Unknown precision control: %d", precision_control());
};
// flags
@ -4853,7 +4853,7 @@ class FPU_State {
case 3: return "empty";
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
void print() const {
@ -5189,7 +5189,7 @@ void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
}
// Used for storing NULLs.
// Used for storing nulls.
void MacroAssembler::store_heap_oop_null(Address dst) {
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
}
@ -5205,7 +5205,7 @@ void MacroAssembler::store_klass_gap(Register dst, Register src) {
#ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) {
assert (UseCompressedOops, "should be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (Universe::heap() != nullptr, "java heap should be initialized");
if (CheckCompressedOops) {
Label ok;
ExternalAddress src2(CompressedOops::ptrs_base_addr());
@ -5230,7 +5230,7 @@ void MacroAssembler::encode_heap_oop(Register r) {
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
#endif
verify_oop_msg(r, "broken oop in encode_heap_oop");
if (CompressedOops::base() == NULL) {
if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0) {
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shrq(r, LogMinObjAlignmentInBytes);
@ -5255,7 +5255,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register r) {
}
#endif
verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
if (CompressedOops::base() != NULL) {
if (CompressedOops::base() != nullptr) {
subq(r, r12_heapbase);
}
if (CompressedOops::shift() != 0) {
@ -5279,7 +5279,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
if (dst != src) {
movq(dst, src);
}
if (CompressedOops::base() != NULL) {
if (CompressedOops::base() != nullptr) {
subq(dst, r12_heapbase);
}
if (CompressedOops::shift() != 0) {
@ -5292,7 +5292,7 @@ void MacroAssembler::decode_heap_oop(Register r) {
#ifdef ASSERT
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
#endif
if (CompressedOops::base() == NULL) {
if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0) {
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes);
@ -5310,25 +5310,25 @@ void MacroAssembler::decode_heap_oop(Register r) {
void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (Universe::heap() != nullptr, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (CompressedOops::shift() != 0) {
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes);
if (CompressedOops::base() != NULL) {
if (CompressedOops::base() != nullptr) {
addq(r, r12_heapbase);
}
} else {
assert (CompressedOops::base() == NULL, "sanity");
assert (CompressedOops::base() == nullptr, "sanity");
}
}
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (Universe::heap() != nullptr, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
@ -5341,12 +5341,12 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
movq(dst, src);
}
shlq(dst, LogMinObjAlignmentInBytes);
if (CompressedOops::base() != NULL) {
if (CompressedOops::base() != nullptr) {
addq(dst, r12_heapbase);
}
}
} else {
assert (CompressedOops::base() == NULL, "sanity");
assert (CompressedOops::base() == nullptr, "sanity");
if (dst != src) {
movq(dst, src);
}
@ -5355,7 +5355,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
assert_different_registers(r, tmp);
if (CompressedKlassPointers::base() != NULL) {
if (CompressedKlassPointers::base() != nullptr) {
mov64(tmp, (int64_t)CompressedKlassPointers::base());
subq(r, tmp);
}
@ -5367,7 +5367,7 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
assert_different_registers(src, dst);
if (CompressedKlassPointers::base() != NULL) {
if (CompressedKlassPointers::base() != nullptr) {
mov64(dst, -(int64_t)CompressedKlassPointers::base());
addq(dst, src);
} else {
@ -5390,7 +5390,7 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shlq(r, LogKlassAlignmentInBytes);
}
if (CompressedKlassPointers::base() != NULL) {
if (CompressedKlassPointers::base() != nullptr) {
mov64(tmp, (int64_t)CompressedKlassPointers::base());
addq(r, tmp);
}
@ -5404,13 +5404,13 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (CompressedKlassPointers::base() == NULL &&
if (CompressedKlassPointers::base() == nullptr &&
CompressedKlassPointers::shift() == 0) {
// The best case scenario is that there is no base or shift. Then it is already
// a pointer that needs nothing but a register rename.
movl(dst, src);
} else {
if (CompressedKlassPointers::base() != NULL) {
if (CompressedKlassPointers::base() != nullptr) {
mov64(dst, (int64_t)CompressedKlassPointers::base());
} else {
xorq(dst, dst);
@ -5427,8 +5427,8 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src)
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
mov_narrow_oop(dst, oop_index, rspec);
@ -5436,8 +5436,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
mov_narrow_oop(dst, oop_index, rspec);
@ -5445,7 +5445,7 @@ void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5453,7 +5453,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5461,8 +5461,8 @@ void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
Assembler::cmp_narrow_oop(dst, oop_index, rspec);
@ -5470,8 +5470,8 @@ void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
Assembler::cmp_narrow_oop(dst, oop_index, rspec);
@ -5479,7 +5479,7 @@ void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5487,7 +5487,7 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5495,8 +5495,8 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
if (Universe::heap() != NULL) {
if (CompressedOops::base() == NULL) {
if (Universe::heap() != nullptr) {
if (CompressedOops::base() == nullptr) {
MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
} else {
mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());

View File

@ -91,9 +91,9 @@ class MacroAssembler: public Assembler {
Address as_Address(AddressLiteral adr);
Address as_Address(ArrayAddress adr, Register rscratch);
// Support for NULL-checks
// Support for null-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
// Generates code that causes a null OS exception if the content of reg is null.
// If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size).
@ -119,7 +119,7 @@ class MacroAssembler: public Assembler {
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
file == NULL ? "<NULL>" : file, line);
file == nullptr ? "<null>" : file, line);
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
@ -377,7 +377,7 @@ class MacroAssembler: public Assembler {
void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
// Used for storing NULL. All other oop constants should be
// Used for storing null. All other oop constants should be
// stored using routines that take a jobject.
void store_heap_oop_null(Address dst);
@ -385,7 +385,7 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register dst, Register src);
// This dummy is to prevent a call to store_heap_oop from
// converting a zero (like NULL) into a Register by giving
// converting a zero (like null) into a Register by giving
// the compiler two choices it can't resolve
void store_heap_oop(Address dst, void* dummy);
@ -610,7 +610,7 @@ public:
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// One of the three labels can be null, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg.
void check_klass_subtype_fast_path(Register sub_klass,
@ -643,8 +643,8 @@ public:
void clinit_barrier(Register klass,
Register thread,
Label* L_fast_path = NULL,
Label* L_slow_path = NULL);
Label* L_fast_path = nullptr,
Label* L_slow_path = nullptr);
// method handles (JSR 292)
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -231,14 +231,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument.
__ hlt(); // empty stubs make SG sick
return NULL;
return nullptr;
}
// No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) {
__ hlt();
return NULL;
return nullptr;
}
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
@ -521,8 +521,8 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* saved_regs,
intptr_t* entry_sp) {
// called as a leaf from native code: do not block the JVM!
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
bool has_mh = (strstr(adaptername, "/static") == nullptr &&
strstr(adaptername, "linkTo") == nullptr); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
log_info(methodhandles)("MH %s %s=" PTR_FORMAT " sp=" PTR_FORMAT, adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));
@ -584,7 +584,7 @@ void trace_method_handle_stub(const char* adaptername,
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = cur_frame;
while (trace_calling_frame.fp() < saved_regs) {
assert(trace_calling_frame.cb() == NULL, "not a C frame");
assert(trace_calling_frame.cb() == nullptr, "not a C frame");
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
}
assert(trace_calling_frame.sp() < saved_regs, "wrong frame");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -220,7 +220,7 @@ void NativeCall::insert(address code_pos, address entry) {
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
assert (instr_addr != NULL, "illegal address for code patching");
assert (instr_addr != nullptr, "illegal address for code patching");
NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
@ -616,7 +616,7 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
// (spinlock). Then patches the last byte, and then atomically replaces
// the jmp's with the first 4 byte of the new instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
assert (instr_addr != NULL, "illegal address for code patching (4)");
assert (instr_addr != nullptr, "illegal address for code patching (4)");
NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
// Temporary code

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -745,7 +745,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
if (nop->check()) {
return nop;
}
return NULL;
return nullptr;
}
inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,13 +35,13 @@ address RegisterMap::pd_location(VMReg reg) const {
intptr_t offset_in_bytes = (reg->value() - base_reg->value()) * VMRegImpl::stack_slot_size;
if (base_reg_enc > 15) {
if (offset_in_bytes == 0) {
return NULL; // ZMM16-31 are stored in full.
return nullptr; // ZMM16-31 are stored in full.
}
} else {
if (offset_in_bytes == 0 || offset_in_bytes == 16 || offset_in_bytes == 32) {
// Reads of the low and high 16 byte parts should be handled by location itself because
// they have separate callee saved entries (see RegisterSaver::save_live_registers()).
return NULL;
return nullptr;
}
// The upper part of YMM0-15 and ZMM0-15 registers are saved separately in the frame.
if (offset_in_bytes > 32) {
@ -55,11 +55,11 @@ address RegisterMap::pd_location(VMReg reg) const {
}
}
address base_location = location(base_reg, nullptr);
if (base_location != NULL) {
if (base_location != nullptr) {
return base_location + offset_in_bytes;
}
}
return NULL;
return nullptr;
}
address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,7 +88,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
address Relocation::pd_call_destination(address orig_addr) {
intptr_t adj = 0;
if (orig_addr != NULL) {
if (orig_addr != nullptr) {
// We just moved this call instruction from orig_addr to addr().
// This means its target will appear to have grown by addr() - orig_addr.
adj = -( addr() - orig_addr );
@ -104,7 +104,7 @@ address Relocation::pd_call_destination(address orig_addr) {
return (address) ((NativeMovConstReg*)ni)->data();
} else {
ShouldNotReachHere();
return NULL;
return nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,7 +109,7 @@ void OptoRuntime::generate_exception_blob() {
// registers of the frame being removed.
//
__ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
__ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg);
__ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));

View File

@ -745,22 +745,22 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
(Interpreter::code() != nullptr || StubRoutines::code1() != nullptr)) {
// So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame");
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
if (Interpreter::code() != nullptr)
range_check(masm, rax, rdi,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
if (StubRoutines::code1() != nullptr)
range_check(masm, rax, rdi,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
if (StubRoutines::code2() != nullptr)
range_check(masm, rax, rdi,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
@ -975,7 +975,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "not needed on x86");
assert(regs2 == nullptr, "not needed on x86");
// We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots.
@ -1327,10 +1327,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
(OopMapSet*)nullptr);
}
address native_func = method->native_function();
assert(native_func != NULL, "must have function");
assert(native_func != nullptr, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
@ -1346,7 +1346,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
BasicType* in_elem_bt = nullptr;
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS;
@ -1361,7 +1361,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space
// they require.
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in
// registers a max of 2 on x86.
@ -1487,7 +1487,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */);
bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
@ -2073,7 +2073,7 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 1536, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words;
OopMap* map = NULL;
OopMap* map = nullptr;
// Account for the extra args we place on the stack
// by the time we call fetch_unroll_info
const int additional_words = 2; // deopt kind, thread
@ -2202,7 +2202,7 @@ void SharedRuntime::generate_deopt_blob() {
__ get_thread(rcx);
__ push(rcx);
// fetch_unroll_info needs to call last_java_frame()
__ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg);
__ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
@ -2350,7 +2350,7 @@ void SharedRuntime::generate_deopt_blob() {
__ push(rcx);
// set last_Java_sp, last_Java_fp
__ set_last_Java_frame(rcx, noreg, rbp, NULL, noreg);
__ set_last_Java_frame(rcx, noreg, rbp, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
@ -2447,7 +2447,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// set last_Java_sp
__ get_thread(rdx);
__ set_last_Java_frame(rdx, noreg, noreg, NULL, noreg);
__ set_last_Java_frame(rdx, noreg, noreg, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
@ -2559,7 +2559,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// set last_Java_sp, last_Java_fp
__ get_thread(rdi);
__ set_last_Java_frame(rdi, noreg, rbp, NULL, noreg);
__ set_last_Java_frame(rdi, noreg, rbp, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
@ -2599,7 +2599,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
const int additional_words = 1;
int frame_size_in_words;
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
ResourceMark rm;
OopMapSet *oop_maps = new OopMapSet();
@ -2612,7 +2612,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
const Register java_thread = rdi; // callee-saved for VC++
address start = __ pc();
address call_pc = NULL;
address call_pc = nullptr;
bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
@ -2641,7 +2641,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// Push thread argument and setup last_Java_sp
__ get_thread(java_thread);
__ push(java_thread);
__ set_last_Java_frame(java_thread, noreg, noreg, NULL, noreg);
__ set_last_Java_frame(java_thread, noreg, noreg, nullptr, noreg);
// if this was not a poll_return then we need to correct the return address now.
if (!cause_return) {
@ -2754,7 +2754,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args.
//
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code
ResourceMark rm;
@ -2768,7 +2768,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
extra_words };
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL;
OopMap* map = nullptr;
int start = __ offset();
@ -2780,7 +2780,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ get_thread(rdi);
__ push(thread);
__ set_last_Java_frame(thread, noreg, rbp, NULL, noreg);
__ set_last_Java_frame(thread, noreg, rbp, nullptr, noreg);
__ call(RuntimeAddress(destination));

View File

@ -796,7 +796,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// caller, but with an uncorrected stack, causing delayed havoc.
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
(Interpreter::code() != nullptr || StubRoutines::code1() != nullptr)) {
// So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
@ -805,15 +805,15 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// Pick up the return address
__ movptr(rax, Address(rsp, 0));
Label L_ok;
if (Interpreter::code() != NULL)
if (Interpreter::code() != nullptr)
range_check(masm, rax, r11,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
if (StubRoutines::code1() != nullptr)
range_check(masm, rax, r11,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
if (StubRoutines::code2() != nullptr)
range_check(masm, rax, r11,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
@ -1014,7 +1014,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_entry = __ pc();
// Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL;
address c2i_no_clinit_check_entry = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
Register method = rbx;
@ -1048,7 +1048,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "not needed on x86");
assert(regs2 == nullptr, "not needed on x86");
// We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots.
@ -1761,10 +1761,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
nullptr);
}
address native_func = method->native_function();
assert(native_func != NULL, "must have function");
assert(native_func != nullptr, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
@ -1781,7 +1781,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
BasicType* in_elem_bt = nullptr;
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS;
@ -1796,7 +1796,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space
// they require.
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in
// incoming registers
@ -1926,7 +1926,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
// native wrapper is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */);
bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
@ -2524,7 +2524,7 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words;
OopMap* map = NULL;
OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet();
// -------------
@ -2602,7 +2602,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight.
RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
// fetch_unroll_info needs to call last_java_frame()
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1);
__ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
__ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
__ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
@ -2684,7 +2684,7 @@ void SharedRuntime::generate_deopt_blob() {
// fetch_unroll_info needs to call last_java_frame().
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1);
__ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
@ -2717,7 +2717,7 @@ void SharedRuntime::generate_deopt_blob() {
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException);
__ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
// QQQ this is useless it was NULL above
// QQQ this is useless it was null above
__ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
__ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
@ -2903,7 +2903,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// runtime expects it.
__ movl(c_rarg1, j_rarg0);
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1);
__ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
@ -3061,7 +3061,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// and setup oopmap.
//
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
assert(StubRoutines::forward_exception_entry() != NULL,
assert(StubRoutines::forward_exception_entry() != nullptr,
"must be generated before");
ResourceMark rm;
@ -3073,7 +3073,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
MacroAssembler* masm = new MacroAssembler(&buffer);
address start = __ pc();
address call_pc = NULL;
address call_pc = nullptr;
int frame_size_in_words;
bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_wide_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
@ -3097,7 +3097,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// address of the call in order to generate an oopmap. Hence, we do all the
// work ourselves.
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
__ set_last_Java_frame(noreg, noreg, nullptr, rscratch1); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
// The return address must always be correct so that frame constructor never
// sees an invalid pc.
@ -3227,7 +3227,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args.
//
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code
ResourceMark rm;
@ -3238,7 +3238,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
int frame_size_in_words;
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL;
OopMap* map = nullptr;
int start = __ offset();
@ -3247,7 +3247,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
int frame_complete = __ offset();
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1);
__ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
__ mov(c_rarg0, r15_thread);

View File

@ -364,7 +364,7 @@ class StubGenerator: public StubCodeGenerator {
ExternalAddress((address)__FILE__), noreg);
__ movl(Address(rcx, Thread::exception_line_offset()), __LINE__ );
// complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
assert(StubRoutines::_call_stub_return_address != nullptr, "_call_stub_return_address must have been generated before");
__ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
return start;
@ -970,7 +970,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ movptr(rax, Address(rsp, 4 * wordSize)); // get object
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is ok
__ jcc(Assembler::zero, exit); // if obj is null it is ok
// Check if the oop is in the right area of memory
const int oop_mask = Universe::verify_oop_mask();
@ -983,7 +983,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure klass is 'reasonable', which is not zero.
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
__ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
__ jcc(Assembler::zero, error); // if klass is null it is broken
// return if everything seems ok
__ bind(exit);
@ -1109,7 +1109,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(to , Address(rsp, 12+ 8));
__ movl(count, Address(rsp, 12+ 12));
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc(); // Entry point from conjoint arraycopy stub.
BLOCK_COMMENT("Entry:");
}
@ -1286,7 +1286,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(dst , Address(rsp, 12+ 8)); // to
__ movl2ptr(count, Address(rsp, 12+12)); // count
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:");
}
@ -1544,13 +1544,13 @@ class StubGenerator: public StubCodeGenerator {
Label L_fallthrough;
#define LOCAL_JCC(assembler_con, label_ptr) \
if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \
if (label_ptr != nullptr) __ jcc(assembler_con, *(label_ptr)); \
else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
// The following is a strange variation of the fast path which requires
// one less register, because needed values are on the argument stack.
// __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
// L_success, L_failure, NULL);
// L_success, L_failure, null);
assert_different_registers(sub_klass, temp);
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
@ -1579,8 +1579,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(L_fallthrough);
if (L_success == NULL) { BLOCK_COMMENT("L_success:"); }
if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); }
if (L_success == nullptr) { BLOCK_COMMENT("L_success:"); }
if (L_failure == nullptr) { BLOCK_COMMENT("L_failure:"); }
#undef LOCAL_JCC
}
@ -1634,7 +1634,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(to, to_arg);
__ movl2ptr(length, length_arg);
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:");
}
@ -1702,7 +1702,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(elem_klass, elem_klass_addr); // query the object klass
generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
&L_store_element, NULL);
&L_store_element, nullptr);
// (On fall-through, we have failed the element type check.)
// ======== end loop ========
@ -1909,7 +1909,7 @@ class StubGenerator: public StubCodeGenerator {
// (2) src_pos must not be negative.
// (3) dst_pos must not be negative.
// (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL.
// (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst.
@ -1921,7 +1921,7 @@ class StubGenerator: public StubCodeGenerator {
const Register dst_pos = rdi;
const Register length = rcx; // transfer count
// if (src == NULL) return -1;
// if (src == null) return -1;
__ movptr(src, SRC); // src oop
__ testptr(src, src);
__ jccb(Assembler::zero, L_failed_0);
@ -1931,7 +1931,7 @@ class StubGenerator: public StubCodeGenerator {
__ testl(src_pos, src_pos);
__ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1;
// if (dst == nullptr) return -1;
__ movptr(dst, DST); // dst oop
__ testptr(dst, dst);
__ jccb(Assembler::zero, L_failed_0);
@ -1946,18 +1946,18 @@ class StubGenerator: public StubCodeGenerator {
__ testl(length, length);
__ jccb(Assembler::negative, L_failed_0);
// if (src->klass() == NULL) return -1;
// if (src->klass() == nullptr) return -1;
Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
const Register rcx_src_klass = rcx; // array klass
__ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
#ifdef ASSERT
// assert(src->klass() != NULL);
// assert(src->klass() != nullptr);
BLOCK_COMMENT("assert klasses not null");
{ Label L1, L2;
__ testptr(rcx_src_klass, rcx_src_klass);
__ jccb(Assembler::notZero, L2); // it is broken if klass is NULL
__ jccb(Assembler::notZero, L2); // it is broken if klass is null
__ bind(L1);
__ stop("broken null klass");
__ bind(L2);
@ -2130,7 +2130,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_fail_array_check;
generate_type_check(rbx_src_klass,
super_check_offset_addr, dst_klass_addr,
rdi_temp, NULL, &L_fail_array_check);
rdi_temp, nullptr, &L_fail_array_check);
// (On fall-through, we have passed the array type check.)
__ pop(rbx);
__ jmp(L_plain_copy);
@ -2194,7 +2194,7 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jbyte_disjoint_arraycopy");
StubRoutines::_arrayof_jbyte_arraycopy =
generate_conjoint_copy(T_BYTE, true, Address::times_1, entry,
NULL, "arrayof_jbyte_arraycopy");
nullptr, "arrayof_jbyte_arraycopy");
StubRoutines::_jbyte_disjoint_arraycopy =
generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry,
"jbyte_disjoint_arraycopy");
@ -2207,7 +2207,7 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jshort_disjoint_arraycopy");
StubRoutines::_arrayof_jshort_arraycopy =
generate_conjoint_copy(T_SHORT, true, Address::times_2, entry,
NULL, "arrayof_jshort_arraycopy");
nullptr, "arrayof_jshort_arraycopy");
StubRoutines::_jshort_disjoint_arraycopy =
generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry,
"jshort_disjoint_arraycopy");
@ -2236,7 +2236,7 @@ class StubGenerator: public StubCodeGenerator {
/*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit =
generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
NULL, "oop_arraycopy_uninit",
nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true);
StubRoutines::_jlong_disjoint_arraycopy =
@ -2265,7 +2265,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_checkcast_arraycopy =
generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit =
generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true);
generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, /*dest_uninitialized*/true);
StubRoutines::_unsafe_arraycopy =
generate_unsafe_copy("unsafe_arraycopy",
@ -3924,7 +3924,7 @@ class StubGenerator: public StubCodeGenerator {
}
// Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, NULL, noreg);
__ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg);
// Call runtime
BLOCK_COMMENT("call runtime_entry");
@ -4231,7 +4231,7 @@ class StubGenerator: public StubCodeGenerator {
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
if (bs_nm != nullptr) {
StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
}
}
@ -4251,7 +4251,7 @@ class StubGenerator: public StubCodeGenerator {
#define UCM_TABLE_MAX_ENTRIES 16
void StubGenerator_generate(CodeBuffer* code, int phase) {
if (UnsafeCopyMemory::_table == NULL) {
if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, phase);

View File

@ -459,7 +459,7 @@ address StubGenerator::generate_catch_exception() {
__ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
// complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL,
assert(StubRoutines::_call_stub_return_address != nullptr,
"_call_stub_return_address must have been generated before");
__ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
@ -1091,7 +1091,7 @@ address StubGenerator::generate_verify_oop() {
// make sure object is 'reasonable'
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
__ jcc(Assembler::zero, exit); // if obj is null it is OK
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error);
@ -4085,7 +4085,7 @@ void StubGenerator::generate_all() {
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
if (bs_nm != nullptr) {
StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
}
#ifdef COMPILER2
@ -4112,13 +4112,13 @@ void StubGenerator::generate_all() {
}
// Get svml stub routine addresses
void *libjsvml = NULL;
void *libjsvml = nullptr;
char ebuf[1024];
char dll_name[JVM_MAXPATHLEN];
if (os::dll_locate_lib(dll_name, sizeof(dll_name), Arguments::get_dll_dir(), "jsvml")) {
libjsvml = os::dll_load(dll_name, ebuf, sizeof ebuf);
}
if (libjsvml != NULL) {
if (libjsvml != nullptr) {
// SVML method naming convention
// All the methods are named as __jsvml_op<T><N>_ha_<VV>
// Where:
@ -4182,7 +4182,7 @@ void StubGenerator::generate_all() {
}
void StubGenerator_generate(CodeBuffer* code, int phase) {
if (UnsafeCopyMemory::_table == NULL) {
if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(16);
}
StubGenerator g(code, phase);

View File

@ -122,11 +122,11 @@ class StubGenerator: public StubCodeGenerator {
void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf);
void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
assert(no_overlap_target != NULL, "must be generated");
array_overlap_test(no_overlap_target, NULL, sf);
assert(no_overlap_target != nullptr, "must be generated");
array_overlap_test(no_overlap_target, nullptr, sf);
}
void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
array_overlap_test(NULL, &L_no_overlap, sf);
array_overlap_test(nullptr, &L_no_overlap, sf);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -113,7 +113,7 @@ void StubGenerator::generate_arraycopy_stubs() {
"oop_disjoint_arraycopy_uninit",
/*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
NULL, "oop_arraycopy_uninit",
nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true);
} else {
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
@ -124,12 +124,12 @@ void StubGenerator::generate_arraycopy_stubs() {
"oop_disjoint_arraycopy_uninit",
/*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
NULL, "oop_arraycopy_uninit",
nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true);
}
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
/*dest_uninitialized*/true);
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
@ -212,7 +212,7 @@ void StubGenerator::array_overlap_test(address no_overlap_target, Label* NOLp, A
__ cmpptr(to, from);
__ lea(end_from, Address(from, count, sf, 0));
if (NOLp == NULL) {
if (NOLp == nullptr) {
ExternalAddress no_overlap(no_overlap_target);
__ jump_cc(Assembler::belowEqual, no_overlap);
__ cmpptr(to, end_from);
@ -530,7 +530,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(address* entry, const
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -752,7 +752,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(address* entry, const
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1152,7 +1152,7 @@ address StubGenerator::generate_disjoint_byte_copy(bool aligned, address* entry,
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1265,7 +1265,7 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1383,7 +1383,7 @@ address StubGenerator::generate_disjoint_short_copy(bool aligned, address *entry
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1514,7 +1514,7 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1625,7 +1625,7 @@ address StubGenerator::generate_disjoint_int_oop_copy(bool aligned, bool is_oop,
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1732,7 +1732,7 @@ address StubGenerator::generate_conjoint_int_oop_copy(bool aligned, bool is_oop,
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1850,7 +1850,7 @@ address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop
// Save no-overlap entry point for generate_conjoint_long_oop_copy()
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -1958,7 +1958,7 @@ address StubGenerator::generate_conjoint_long_oop_copy(bool aligned, bool is_oop
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
@ -2040,9 +2040,9 @@ void StubGenerator::generate_type_check(Register sub_klass,
Label L_miss;
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr,
super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr);
// Fall through on failure!
__ BIND(L_miss);
@ -2119,7 +2119,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry,
#endif
// Caller of this entry point must set up the argument registers.
if (entry != NULL) {
if (entry != nullptr) {
*entry = __ pc();
BLOCK_COMMENT("Entry:");
}
@ -2426,13 +2426,13 @@ address StubGenerator::generate_generic_copy(const char *name,
// (2) src_pos must not be negative.
// (3) dst_pos must not be negative.
// (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL.
// (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst.
//
// if (src == NULL) return -1;
// if (src == nullptr) return -1;
__ testptr(src, src); // src oop
size_t j1off = __ offset();
__ jccb(Assembler::zero, L_failed_0);
@ -2441,7 +2441,7 @@ address StubGenerator::generate_generic_copy(const char *name,
__ testl(src_pos, src_pos); // src_pos (32-bits)
__ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1;
// if (dst == nullptr) return -1;
__ testptr(dst, dst); // dst oop
__ jccb(Assembler::zero, L_failed_0);
@ -2469,12 +2469,12 @@ address StubGenerator::generate_generic_copy(const char *name,
__ load_klass(r10_src_klass, src, rklass_tmp);
#ifdef ASSERT
// assert(src->klass() != NULL);
// assert(src->klass() != nullptr);
{
BLOCK_COMMENT("assert klasses not null {");
Label L1, L2;
__ testptr(r10_src_klass, r10_src_klass);
__ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
__ jcc(Assembler::notZero, L2); // it is broken if klass is null
__ bind(L1);
__ stop("broken null klass");
__ bind(L2);

View File

@ -33,57 +33,57 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_upper_word_mask_addr = NULL;
address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
address StubRoutines::x86::_k256_adr = NULL;
address StubRoutines::x86::_vector_short_to_byte_mask = NULL;
address StubRoutines::x86::_vector_int_to_byte_mask = NULL;
address StubRoutines::x86::_vector_int_to_short_mask = NULL;
address StubRoutines::x86::_vector_all_bits_set = NULL;
address StubRoutines::x86::_vector_byte_shuffle_mask = NULL;
address StubRoutines::x86::_vector_int_mask_cmp_bits = NULL;
address StubRoutines::x86::_vector_short_shuffle_mask = NULL;
address StubRoutines::x86::_vector_int_shuffle_mask = NULL;
address StubRoutines::x86::_vector_long_shuffle_mask = NULL;
address StubRoutines::x86::_vector_float_sign_mask = NULL;
address StubRoutines::x86::_vector_float_sign_flip = NULL;
address StubRoutines::x86::_vector_double_sign_mask = NULL;
address StubRoutines::x86::_vector_double_sign_flip = NULL;
address StubRoutines::x86::_vector_byte_perm_mask = NULL;
address StubRoutines::x86::_vector_long_sign_mask = NULL;
address StubRoutines::x86::_vector_iota_indices = NULL;
address StubRoutines::x86::_vector_reverse_bit_lut = NULL;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_long = NULL;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_int = NULL;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_short = NULL;
address StubRoutines::x86::_vector_popcount_lut = NULL;
address StubRoutines::x86::_vector_count_leading_zeros_lut = NULL;
address StubRoutines::x86::_vector_32_bit_mask = NULL;
address StubRoutines::x86::_vector_64_bit_mask = NULL;
address StubRoutines::x86::_verify_mxcsr_entry = nullptr;
address StubRoutines::x86::_upper_word_mask_addr = nullptr;
address StubRoutines::x86::_shuffle_byte_flip_mask_addr = nullptr;
address StubRoutines::x86::_k256_adr = nullptr;
address StubRoutines::x86::_vector_short_to_byte_mask = nullptr;
address StubRoutines::x86::_vector_int_to_byte_mask = nullptr;
address StubRoutines::x86::_vector_int_to_short_mask = nullptr;
address StubRoutines::x86::_vector_all_bits_set = nullptr;
address StubRoutines::x86::_vector_byte_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_int_mask_cmp_bits = nullptr;
address StubRoutines::x86::_vector_short_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_int_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_long_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_float_sign_mask = nullptr;
address StubRoutines::x86::_vector_float_sign_flip = nullptr;
address StubRoutines::x86::_vector_double_sign_mask = nullptr;
address StubRoutines::x86::_vector_double_sign_flip = nullptr;
address StubRoutines::x86::_vector_byte_perm_mask = nullptr;
address StubRoutines::x86::_vector_long_sign_mask = nullptr;
address StubRoutines::x86::_vector_iota_indices = nullptr;
address StubRoutines::x86::_vector_reverse_bit_lut = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_long = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_int = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_short = nullptr;
address StubRoutines::x86::_vector_popcount_lut = nullptr;
address StubRoutines::x86::_vector_count_leading_zeros_lut = nullptr;
address StubRoutines::x86::_vector_32_bit_mask = nullptr;
address StubRoutines::x86::_vector_64_bit_mask = nullptr;
#ifdef _LP64
address StubRoutines::x86::_k256_W_adr = NULL;
address StubRoutines::x86::_k512_W_addr = NULL;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = NULL;
address StubRoutines::x86::_k256_W_adr = nullptr;
address StubRoutines::x86::_k512_W_addr = nullptr;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = nullptr;
// Base64 masks
address StubRoutines::x86::_encoding_table_base64 = NULL;
address StubRoutines::x86::_shuffle_base64 = NULL;
address StubRoutines::x86::_avx2_shuffle_base64 = NULL;
address StubRoutines::x86::_avx2_input_mask_base64 = NULL;
address StubRoutines::x86::_avx2_lut_base64 = NULL;
address StubRoutines::x86::_avx2_decode_tables_base64 = NULL;
address StubRoutines::x86::_avx2_decode_lut_tables_base64 = NULL;
address StubRoutines::x86::_lookup_lo_base64 = NULL;
address StubRoutines::x86::_lookup_hi_base64 = NULL;
address StubRoutines::x86::_lookup_lo_base64url = NULL;
address StubRoutines::x86::_lookup_hi_base64url = NULL;
address StubRoutines::x86::_pack_vec_base64 = NULL;
address StubRoutines::x86::_join_0_1_base64 = NULL;
address StubRoutines::x86::_join_1_2_base64 = NULL;
address StubRoutines::x86::_join_2_3_base64 = NULL;
address StubRoutines::x86::_decoding_table_base64 = NULL;
address StubRoutines::x86::_encoding_table_base64 = nullptr;
address StubRoutines::x86::_shuffle_base64 = nullptr;
address StubRoutines::x86::_avx2_shuffle_base64 = nullptr;
address StubRoutines::x86::_avx2_input_mask_base64 = nullptr;
address StubRoutines::x86::_avx2_lut_base64 = nullptr;
address StubRoutines::x86::_avx2_decode_tables_base64 = nullptr;
address StubRoutines::x86::_avx2_decode_lut_tables_base64 = nullptr;
address StubRoutines::x86::_lookup_lo_base64 = nullptr;
address StubRoutines::x86::_lookup_hi_base64 = nullptr;
address StubRoutines::x86::_lookup_lo_base64url = nullptr;
address StubRoutines::x86::_lookup_hi_base64url = nullptr;
address StubRoutines::x86::_pack_vec_base64 = nullptr;
address StubRoutines::x86::_join_0_1_base64 = nullptr;
address StubRoutines::x86::_join_1_2_base64 = nullptr;
address StubRoutines::x86::_join_2_3_base64 = nullptr;
address StubRoutines::x86::_decoding_table_base64 = nullptr;
#endif
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = nullptr;
uint64_t StubRoutines::x86::_crc_by128_masks[] =
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
address StubRoutines::x86::_method_entry_barrier = NULL;
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = nullptr;
address StubRoutines::x86::_method_entry_barrier = nullptr;
address StubRoutines::x86::_d2i_wrapper = NULL;
address StubRoutines::x86::_d2l_wrapper = NULL;
address StubRoutines::x86::_d2i_wrapper = nullptr;
address StubRoutines::x86::_d2l_wrapper = nullptr;
jint StubRoutines::x86::_fpu_cntrl_wrd_std = 0;
jint StubRoutines::x86::_fpu_cntrl_wrd_24 = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,15 +33,15 @@
jint StubRoutines::x86::_mxcsr_std = 0;
address StubRoutines::x86::_get_previous_sp_entry = NULL;
address StubRoutines::x86::_get_previous_sp_entry = nullptr;
address StubRoutines::x86::_f2i_fixup = NULL;
address StubRoutines::x86::_f2l_fixup = NULL;
address StubRoutines::x86::_d2i_fixup = NULL;
address StubRoutines::x86::_d2l_fixup = NULL;
address StubRoutines::x86::_float_sign_mask = NULL;
address StubRoutines::x86::_float_sign_flip = NULL;
address StubRoutines::x86::_double_sign_mask = NULL;
address StubRoutines::x86::_double_sign_flip = NULL;
address StubRoutines::x86::_method_entry_barrier = NULL;
address StubRoutines::x86::_f2i_fixup = nullptr;
address StubRoutines::x86::_f2l_fixup = nullptr;
address StubRoutines::x86::_d2i_fixup = nullptr;
address StubRoutines::x86::_d2l_fixup = nullptr;
address StubRoutines::x86::_float_sign_mask = nullptr;
address StubRoutines::x86::_float_sign_flip = nullptr;
address StubRoutines::x86::_double_sign_mask = nullptr;
address StubRoutines::x86::_double_sign_flip = nullptr;
address StubRoutines::x86::_method_entry_barrier = nullptr;

View File

@ -146,7 +146,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address TemplateInterpreterGenerator::generate_exception_handler_common(
const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both");
assert(!pass_oop || message == nullptr, "either oop or message but not both");
address entry = __ pc();
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
@ -206,7 +206,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Restore stack bottom in case i2c adjusted stack
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call
// and null it as marker that esp is now tos until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ restore_bcp();
@ -254,7 +254,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
}
#endif // _LP64
// NULL last_sp until next java call
// null last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ restore_bcp();
__ restore_locals();
@ -297,7 +297,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ should_not_reach_here();
__ bind(L);
}
if (continuation == NULL) {
if (continuation == nullptr) {
__ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
@ -434,8 +434,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
// InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second
// indicates if the counter overflow occurs at a backwards branch
// (NULL bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or NULL if the
// (null bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or null if the
// compilation did not complete (either went background or bailed
// out).
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
@ -535,7 +535,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check_pop);
@ -589,7 +589,7 @@ void TemplateInterpreterGenerator::lock_method() {
Label L;
__ testptr(rax, rax);
__ jcc(Assembler::notZero, L);
__ stop("synchronization object is NULL");
__ stop("synchronization object is null");
__ bind(L);
}
#endif // ASSERT
@ -687,7 +687,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
Label slow_path;
// rbx: method
// Check if local 0 != NULL
// Check if local 0 != null
// If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize));
@ -1302,7 +1302,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry
// pop return address, reset last_sp to NULL
// pop return address, reset last_sp to null
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
@ -1650,7 +1650,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ jcc(Assembler::notEqual, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
__ get_method(rdx);
__ movptr(rax, Address(local0, 0));
@ -1839,7 +1839,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL,
assert(Interpreter::trace_code(t->tos_in()) != nullptr,
"entry must have been generated");
#ifndef _LP64
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));

View File

@ -225,7 +225,7 @@ address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() {
return entry;
}
return NULL;
return nullptr;
}
/**
@ -251,7 +251,7 @@ address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
return entry;
}
return NULL;
return nullptr;
}
@ -278,7 +278,7 @@ address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() {
return entry;
}
return NULL;
return nullptr;
}
/**
@ -305,7 +305,7 @@ address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry(
return entry;
}
return NULL;
return nullptr;
}
/**
@ -375,7 +375,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
//
if (kind == Interpreter::java_lang_math_fmaD) {
if (!UseFMA) {
return NULL; // Generate a vanilla entry
return nullptr; // Generate a vanilla entry
}
__ movdbl(xmm2, Address(rsp, 5 * wordSize));
__ movdbl(xmm1, Address(rsp, 3 * wordSize));
@ -388,7 +388,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
return entry_point;
} else if (kind == Interpreter::java_lang_math_fmaF) {
if (!UseFMA) {
return NULL; // Generate a vanilla entry
return nullptr; // Generate a vanilla entry
}
__ movflt(xmm2, Address(rsp, 3 * wordSize));
__ movflt(xmm1, Address(rsp, 2 * wordSize));
@ -406,7 +406,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_sin :
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) {
if (VM_Version::supports_sse2() && StubRoutines::dsin() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin));
@ -416,7 +416,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_cos :
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) {
if (VM_Version::supports_sse2() && StubRoutines::dcos() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos));
@ -426,7 +426,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_tan :
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (StubRoutines::dtan() != NULL) {
if (StubRoutines::dtan() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
@ -442,7 +442,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_log:
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (StubRoutines::dlog() != NULL) {
if (StubRoutines::dlog() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog));
@ -452,7 +452,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_log10:
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (StubRoutines::dlog10() != NULL) {
if (StubRoutines::dlog10() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10));
@ -464,7 +464,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ subptr(rsp, 4 * wordSize);
__ fstp_d(Address(rsp, 0));
__ fstp_d(Address(rsp, 2 * wordSize));
if (StubRoutines::dpow() != NULL) {
if (StubRoutines::dpow() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow));
@ -474,7 +474,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_exp:
__ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
if (StubRoutines::dexp() != NULL) {
if (StubRoutines::dexp() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp));

View File

@ -58,7 +58,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// bcp (null)
// ...
// Do FP first so we can use c_rarg3 as temp
@ -138,7 +138,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// bcp (null)
// ...
// Do FP first so we can use c_rarg3 as temp
@ -399,7 +399,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
if (kind == Interpreter::java_lang_math_fmaD) {
if (!UseFMA) {
return NULL; // Generate a vanilla entry
return nullptr; // Generate a vanilla entry
}
__ movdbl(xmm0, Address(rsp, wordSize));
__ movdbl(xmm1, Address(rsp, 3 * wordSize));
@ -407,7 +407,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ fmad(xmm0, xmm1, xmm2, xmm0);
} else if (kind == Interpreter::java_lang_math_fmaF) {
if (!UseFMA) {
return NULL; // Generate a vanilla entry
return nullptr; // Generate a vanilla entry
}
__ movflt(xmm0, Address(rsp, wordSize));
__ movflt(xmm1, Address(rsp, 2 * wordSize));
@ -417,35 +417,35 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ sqrtsd(xmm0, Address(rsp, wordSize));
} else if (kind == Interpreter::java_lang_math_exp) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dexp() != NULL) {
if (StubRoutines::dexp() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp));
}
} else if (kind == Interpreter::java_lang_math_log) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dlog() != NULL) {
if (StubRoutines::dlog() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog));
}
} else if (kind == Interpreter::java_lang_math_log10) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dlog10() != NULL) {
if (StubRoutines::dlog10() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10));
}
} else if (kind == Interpreter::java_lang_math_sin) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dsin() != NULL) {
if (StubRoutines::dsin() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin));
}
} else if (kind == Interpreter::java_lang_math_cos) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dcos() != NULL) {
if (StubRoutines::dcos() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos));
@ -453,20 +453,20 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else if (kind == Interpreter::java_lang_math_pow) {
__ movdbl(xmm1, Address(rsp, wordSize));
__ movdbl(xmm0, Address(rsp, 3 * wordSize));
if (StubRoutines::dpow() != NULL) {
if (StubRoutines::dpow() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow));
}
} else if (kind == Interpreter::java_lang_math_tan) {
__ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dtan() != NULL) {
if (StubRoutines::dtan() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
}
} else if (kind == Interpreter::java_lang_math_abs) {
assert(StubRoutines::x86::double_sign_mask() != NULL, "not initialized");
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize));
__ andpd(xmm0, ExternalAddress(StubRoutines::x86::double_sign_mask()));
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,8 +143,8 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) {
// Miscellaneous helper routines
// Store an oop (or NULL) at the address described by obj.
// If val == noreg this means store a NULL
// Store an oop (or null) at the address described by obj.
// If val == noreg this means store a null
static void do_oop_store(InterpreterMacroAssembler* _masm,
@ -452,7 +452,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ resolve_oop_handle(tmp, rscratch2);
__ cmpoop(tmp, result);
__ jccb(Assembler::notEqual, notNull);
__ xorptr(result, result); // NULL object reference
__ xorptr(result, result); // null object reference
__ bind(notNull);
}
@ -1155,11 +1155,11 @@ void TemplateTable::aastore() {
do_oop_store(_masm, element_address, rax, IS_ARRAY);
__ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
// Have a null in rax, rdx=array, ecx=index. Store null at ary[idx]
__ bind(is_null);
__ profile_null_seen(rbx);
// Store a NULL
// Store a null
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments
@ -2208,7 +2208,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
in_bytes(InvocationCounter::counter_offset()));
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ jmp(dispatch);
}
__ bind(no_mdo);
@ -2216,7 +2216,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ bind(dispatch);
}
@ -2242,7 +2242,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
InterpreterRuntime::frequency_counter_overflow),
rdx);
// rax: osr nmethod (osr ok) or NULL (osr not possible)
// rax: osr nmethod (osr ok) or null (osr not possible)
// rdx: scratch
// r14: locals pointer
// r13: bcp
@ -2687,7 +2687,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ load_resolved_method_at_index(byte_no, method, cache, index);
__ load_method_holder(klass, method);
__ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
__ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
}
}
@ -2774,13 +2774,13 @@ void TemplateTable::jvmti_post_field_access(Register cache,
__ shll(index, LogBytesPerWord);
__ addptr(cache, index);
if (is_static) {
__ xorptr(rax, rax); // NULL object reference
__ xorptr(rax, rax); // null object reference
} else {
__ pop(atos); // Get the object
__ verify_oop(rax);
__ push(atos); // Restore stack state
}
// rax,: object pointer or NULL
// rax,: object pointer or null
// cache: cache entry pointer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
rax, cache);
@ -3031,7 +3031,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
__ addptr(robj, RDX);
// object (tos)
__ mov(RCX, rsp);
// c_rarg1: object pointer set up above (NULL if static)
// c_rarg1: object pointer set up above (null if static)
// c_rarg2: cache entry pointer
// c_rarg3: jvalue object on the stack
__ call_VM(noreg,
@ -4112,7 +4112,7 @@ void TemplateTable::checkcast() {
__ bind(ok_is_subtype);
__ mov(rax, rdx); // Restore object in rdx
// Collect counts on whether this check-cast sees NULLs a lot or not.
// Collect counts on whether this check-cast sees nulls a lot or not.
if (ProfileInterpreter) {
__ jmp(done);
__ bind(is_null);
@ -4175,7 +4175,7 @@ void TemplateTable::instanceof() {
__ bind(ok_is_subtype);
__ movl(rax, 1);
// Collect counts on whether this test sees NULLs a lot or not.
// Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) {
__ jmp(done);
__ bind(is_null);
@ -4184,8 +4184,8 @@ void TemplateTable::instanceof() {
__ bind(is_null); // same as 'done'
}
__ bind(done);
// rax = 0: obj == NULL or obj is not an instanceof the specified klass
// rax = 1: obj != NULL and obj is an instanceof the specified klass
// rax = 0: obj == nullptr or obj is not an instanceof the specified klass
// rax = 1: obj != nullptr and obj is an instanceof the specified klass
}
@ -4247,7 +4247,7 @@ void TemplateTable::athrow() {
void TemplateTable::monitorenter() {
transition(atos, vtos);
// check for NULL object
// check for null object
__ null_check(rax);
const Address monitor_block_top(
@ -4263,7 +4263,7 @@ void TemplateTable::monitorenter() {
Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
// initialize entry pointer
__ xorl(rmon, rmon); // points to free slot or NULL
__ xorl(rmon, rmon); // points to free slot or null
// find a free slot in the monitor block (result in rmon)
{
@ -4344,7 +4344,7 @@ void TemplateTable::monitorenter() {
void TemplateTable::monitorexit() {
transition(atos, vtos);
// check for NULL object
// check for null object
__ null_check(rax);
const Address monitor_block_top(

View File

@ -63,8 +63,8 @@ extern "C" {
typedef void (*get_cpu_info_stub_t)(void*);
typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
}
static get_cpu_info_stub_t get_cpu_info_stub = NULL;
static detect_virt_stub_t detect_virt_stub = NULL;
static get_cpu_info_stub_t get_cpu_info_stub = nullptr;
static detect_virt_stub_t detect_virt_stub = nullptr;
#ifdef _LP64
@ -402,7 +402,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
//
// Some OSs have a bug when upper 128/256bits of YMM/ZMM
// registers are not restored after a signal processing.
// Generate SEGV here (reference through NULL)
// Generate SEGV here (reference through null)
// and check upper YMM/ZMM bits after it.
//
int saved_useavx = UseAVX;
@ -2111,7 +2111,7 @@ void VM_Version::initialize() {
ResourceMark rm;
// Making this stub must be FIRST use of assembler
stub_blob = BufferBlob::create("VM_Version stub", stub_size);
if (stub_blob == NULL) {
if (stub_blob == nullptr) {
vm_exit_during_initialization("Unable to allocate stub for VM_Version");
}
CodeBuffer c(stub_blob);
@ -2185,7 +2185,7 @@ extern "C" {
typedef void (*getCPUIDBrandString_stub_t)(void*);
}
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = NULL;
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
// VM_Version statics
enum {
@ -2195,7 +2195,7 @@ enum {
const size_t VENDOR_LENGTH = 13;
const size_t CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1);
static char* _cpu_brand_string = NULL;
static char* _cpu_brand_string = nullptr;
static int64_t _max_qualified_cpu_frequency = 0;
static int _no_of_threads = 0;
@ -2320,7 +2320,7 @@ const char* const _model_id_pentium_pro[] = {
"",
"Haswell", // 0x45 "4th Generation Intel Core Processor"
"Haswell", // 0x46 "4th Generation Intel Core Processor"
NULL
nullptr
};
/* Brand ID is for back compatibility
@ -2335,7 +2335,7 @@ const char* const _brand_id[] = {
"",
"",
"Intel Pentium 4 processor",
NULL
nullptr
};
@ -2483,7 +2483,7 @@ void VM_Version::initialize_tsc(void) {
ResourceMark rm;
cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size);
if (cpuid_brand_string_stub_blob == NULL) {
if (cpuid_brand_string_stub_blob == nullptr) {
vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub");
}
CodeBuffer c(cpuid_brand_string_stub_blob);
@ -2495,12 +2495,12 @@ void VM_Version::initialize_tsc(void) {
const char* VM_Version::cpu_model_description(void) {
uint32_t cpu_family = extended_cpu_family();
uint32_t cpu_model = extended_cpu_model();
const char* model = NULL;
const char* model = nullptr;
if (cpu_family == CPU_FAMILY_PENTIUMPRO) {
for (uint32_t i = 0; i <= cpu_model; i++) {
model = _model_id_pentium_pro[i];
if (model == NULL) {
if (model == nullptr) {
break;
}
}
@ -2509,27 +2509,27 @@ const char* VM_Version::cpu_model_description(void) {
}
const char* VM_Version::cpu_brand_string(void) {
if (_cpu_brand_string == NULL) {
if (_cpu_brand_string == nullptr) {
_cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal);
if (NULL == _cpu_brand_string) {
return NULL;
if (nullptr == _cpu_brand_string) {
return nullptr;
}
int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH);
if (ret_val != OS_OK) {
FREE_C_HEAP_ARRAY(char, _cpu_brand_string);
_cpu_brand_string = NULL;
_cpu_brand_string = nullptr;
}
}
return _cpu_brand_string;
}
const char* VM_Version::cpu_brand(void) {
const char* brand = NULL;
const char* brand = nullptr;
if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) {
int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF;
brand = _brand_id[0];
for (int i = 0; brand != NULL && i <= brand_num; i += 1) {
for (int i = 0; brand != nullptr && i <= brand_num; i += 1) {
brand = _brand_id[i];
}
}
@ -2619,11 +2619,11 @@ const char* VM_Version::cpu_family_description(void) {
}
int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!");
assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!");
const char* cpu_type = NULL;
const char* x64 = NULL;
const char* cpu_type = nullptr;
const char* x64 = nullptr;
if (is_intel()) {
cpu_type = "Intel";
@ -2656,9 +2656,9 @@ int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
}
int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!");
assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!");
assert(getCPUIDBrandString_stub != NULL, "not initialized");
assert(getCPUIDBrandString_stub != nullptr, "not initialized");
// invoke newly generated asm code to fetch CPU Brand String
getCPUIDBrandString_stub(&_cpuid_info);
@ -2681,7 +2681,7 @@ int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
}
size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
guarantee(buf != NULL, "buffer is NULL!");
guarantee(buf != nullptr, "buffer is null!");
guarantee(buf_len > 0, "buffer len not enough!");
unsigned int flag = 0;
@ -2742,31 +2742,31 @@ size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
* feature set.
*/
int VM_Version::cpu_detailed_description(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!");
assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!");
static const char* unknown = "<unknown>";
char vendor_id[VENDOR_LENGTH];
const char* family = NULL;
const char* model = NULL;
const char* brand = NULL;
const char* family = nullptr;
const char* model = nullptr;
const char* brand = nullptr;
int outputLen = 0;
family = cpu_family_description();
if (family == NULL) {
if (family == nullptr) {
family = unknown;
}
model = cpu_model_description();
if (model == NULL) {
if (model == nullptr) {
model = unknown;
}
brand = cpu_brand_string();
if (brand == NULL) {
if (brand == nullptr) {
brand = cpu_brand();
if (brand == NULL) {
if (brand == nullptr) {
brand = unknown;
}
}
@ -2835,7 +2835,7 @@ void VM_Version::initialize_cpu_information() {
*/
int64_t VM_Version::max_qualified_cpu_freq_from_brand_string(void) {
const char* const brand_string = cpu_brand_string();
if (brand_string == NULL) {
if (brand_string == nullptr) {
return 0;
}
const int64_t MEGA = 1000000;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,9 +60,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
@ -129,7 +129,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ jcc(Assembler::equal, L);
__ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ stop("Vtable entry is null");
__ bind(L);
}
#endif // PRODUCT
@ -152,9 +152,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,9 +49,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.
@ -120,7 +120,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ jcc(Assembler::equal, L);
__ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ stop("Vtable entry is null");
__ bind(L);
}
#endif // PRODUCT
@ -143,9 +143,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// Count unused bytes in instruction sequences of variable size.