8301498: Replace NULL with nullptr in cpu/x86

Reviewed-by: dholmes, kvn
This commit is contained in:
Johan Sjölen 2023-03-22 14:18:40 +00:00
parent ddf1e34c1a
commit 4154a980ca
54 changed files with 656 additions and 656 deletions

View File

@ -214,7 +214,7 @@ void Assembler::init_attributes(void) {
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false); _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
NOT_LP64(_is_managed = false;) NOT_LP64(_is_managed = false;)
_attributes = NULL; _attributes = nullptr;
} }
@ -264,7 +264,7 @@ void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
assert(imm_operand == 0, "default format must be immediate in this file"); assert(imm_operand == 0, "default format must be immediate in this file");
assert(inst_mark() != NULL, "must be inside InstructionMark"); assert(inst_mark() != nullptr, "must be inside InstructionMark");
if (rspec.type() != relocInfo::none) { if (rspec.type() != relocInfo::none) {
#ifdef ASSERT #ifdef ASSERT
check_relocation(rspec, format); check_relocation(rspec, format);
@ -684,7 +684,7 @@ void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc,
// disp was created by converting the target address minus the pc // disp was created by converting the target address minus the pc
// at the start of the instruction. That needs more correction here. // at the start of the instruction. That needs more correction here.
// intptr_t disp = target - next_ip; // intptr_t disp = target - next_ip;
assert(inst_mark() != NULL, "must be inside InstructionMark"); assert(inst_mark() != nullptr, "must be inside InstructionMark");
address next_ip = pc() + sizeof(int32_t) + post_addr_length; address next_ip = pc() + sizeof(int32_t) + post_addr_length;
int64_t adjusted = disp; int64_t adjusted = disp;
// Do rip-rel adjustment for 64bit // Do rip-rel adjustment for 64bit
@ -1234,7 +1234,7 @@ address Assembler::locate_next_instruction(address inst) {
#ifdef ASSERT #ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) { void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
address inst = inst_mark(); address inst = inst_mark();
assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); assert(inst != nullptr && inst < pc(), "must point to beginning of instruction");
address opnd; address opnd;
Relocation* r = rspec.reloc(); Relocation* r = rspec.reloc();
@ -1690,8 +1690,8 @@ void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
InstructionMark im(this); InstructionMark im(this);
emit_int8((unsigned char)0xE8); emit_int8((unsigned char)0xE8);
intptr_t disp = entry - (pc() + sizeof(int32_t)); intptr_t disp = entry - (pc() + sizeof(int32_t));
// Entry is NULL in case of a scratch emit. // Entry is null in case of a scratch emit.
assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); assert(entry == nullptr || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
// Technically, should use call32_operand, but this format is // Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction. // implied by the fact that we're emitting a call instruction.
@ -2404,7 +2404,7 @@ void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
assert((0 <= cc) && (cc < 16), "illegal cc"); assert((0 <= cc) && (cc < 16), "illegal cc");
if (L.is_bound()) { if (L.is_bound()) {
address dst = target(L); address dst = target(L);
assert(dst != NULL, "jcc most probably wrong"); assert(dst != nullptr, "jcc most probably wrong");
const int short_size = 2; const int short_size = 2;
const int long_size = 6; const int long_size = 6;
@ -2462,7 +2462,7 @@ void Assembler::jmp(Address adr) {
void Assembler::jmp(Label& L, bool maybe_short) { void Assembler::jmp(Label& L, bool maybe_short) {
if (L.is_bound()) { if (L.is_bound()) {
address entry = target(L); address entry = target(L);
assert(entry != NULL, "jmp most probably wrong"); assert(entry != nullptr, "jmp most probably wrong");
InstructionMark im(this); InstructionMark im(this);
const int short_size = 2; const int short_size = 2;
const int long_size = 5; const int long_size = 5;
@ -2493,7 +2493,7 @@ void Assembler::jmp(Register entry) {
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
InstructionMark im(this); InstructionMark im(this);
emit_int8((unsigned char)0xE9); emit_int8((unsigned char)0xE9);
assert(dest != NULL, "must have a target"); assert(dest != nullptr, "must have a target");
intptr_t disp = dest - (pc() + sizeof(int32_t)); intptr_t disp = dest - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (jmp)"); assert(is_simm32(disp), "must be 32bit offset (jmp)");
emit_data(disp, rspec, call32_operand); emit_data(disp, rspec, call32_operand);
@ -2503,7 +2503,7 @@ void Assembler::jmpb_0(Label& L, const char* file, int line) {
if (L.is_bound()) { if (L.is_bound()) {
const int short_size = 2; const int short_size = 2;
address entry = target(L); address entry = target(L);
assert(entry != NULL, "jmp most probably wrong"); assert(entry != nullptr, "jmp most probably wrong");
#ifdef ASSERT #ifdef ASSERT
intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta(); intptr_t delta = short_branch_delta();
@ -6341,7 +6341,7 @@ void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
relocate(rtype); relocate(rtype);
if (abort.is_bound()) { if (abort.is_bound()) {
address entry = target(abort); address entry = target(abort);
assert(entry != NULL, "abort entry NULL"); assert(entry != nullptr, "abort entry null");
intptr_t offset = entry - pc(); intptr_t offset = entry - pc();
emit_int16((unsigned char)0xC7, (unsigned char)0xF8); emit_int16((unsigned char)0xC7, (unsigned char)0xF8);
emit_int32(offset - 6); // 2 opcode + 4 address emit_int32(offset - 6); // 2 opcode + 4 address
@ -12495,7 +12495,7 @@ void Assembler::emit_data64(jlong data,
int format) { int format) {
assert(imm_operand == 0, "default format must be immediate in this file"); assert(imm_operand == 0, "default format must be immediate in this file");
assert(imm_operand == format, "must be immediate"); assert(imm_operand == format, "must be immediate");
assert(inst_mark() != NULL, "must be inside InstructionMark"); assert(inst_mark() != nullptr, "must be inside InstructionMark");
// Do not use AbstractAssembler::relocate, which is not intended for // Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction. // embedded words. Instead, relocate to the enclosing instruction.
code_section()->relocate(inst_mark(), rspec, format); code_section()->relocate(inst_mark(), rspec, format);
@ -13521,13 +13521,13 @@ void Assembler::popq(Register dst) {
// and copying it out on subsequent invocations can thus be beneficial // and copying it out on subsequent invocations can thus be beneficial
static bool precomputed = false; static bool precomputed = false;
static u_char* popa_code = NULL; static u_char* popa_code = nullptr;
static int popa_len = 0; static int popa_len = 0;
static u_char* pusha_code = NULL; static u_char* pusha_code = nullptr;
static int pusha_len = 0; static int pusha_len = 0;
static u_char* vzup_code = NULL; static u_char* vzup_code = nullptr;
static int vzup_len = 0; static int vzup_len = 0;
void Assembler::precompute_instructions() { void Assembler::precompute_instructions() {
@ -13574,7 +13574,7 @@ void Assembler::precompute_instructions() {
} }
static void emit_copy(CodeSection* code_section, u_char* src, int src_len) { static void emit_copy(CodeSection* code_section, u_char* src, int src_len) {
assert(src != NULL, "code to copy must have been pre-computed"); assert(src != nullptr, "code to copy must have been pre-computed");
assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough"); assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough");
address end = code_section->end(); address end = code_section->end();
memcpy(end, src, src_len); memcpy(end, src, src_len);

View File

@ -355,7 +355,7 @@ class AddressLiteral {
// creation // creation
AddressLiteral() AddressLiteral()
: _is_lval(false), : _is_lval(false),
_target(NULL) _target(nullptr)
{} {}
public: public:
@ -919,7 +919,7 @@ private:
void init_attributes(void); void init_attributes(void);
void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
void clear_attributes(void) { _attributes = NULL; } void clear_attributes(void) { _attributes = nullptr; }
void set_managed(void) { NOT_LP64(_is_managed = true;) } void set_managed(void) { NOT_LP64(_is_managed = true;) }
void clear_managed(void) { NOT_LP64(_is_managed = false;) } void clear_managed(void) { NOT_LP64(_is_managed = false;) }
@ -2884,13 +2884,13 @@ public:
_input_size_in_bits(Assembler::EVEX_NObit), _input_size_in_bits(Assembler::EVEX_NObit),
_evex_encoding(0), _evex_encoding(0),
_embedded_opmask_register_specifier(0), // hard code k0 _embedded_opmask_register_specifier(0), // hard code k0
_current_assembler(NULL) { } _current_assembler(nullptr) { }
~InstructionAttr() { ~InstructionAttr() {
if (_current_assembler != NULL) { if (_current_assembler != nullptr) {
_current_assembler->clear_attributes(); _current_assembler->clear_attributes();
} }
_current_assembler = NULL; _current_assembler = nullptr;
} }
private: private:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ class Bytes: AllStatic {
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
template <typename T> template <typename T>
static inline T get_native(const void* p) { static inline T get_native(const void* p) {
assert(p != NULL, "null pointer"); assert(p != nullptr, "null pointer");
T x; T x;
@ -50,7 +50,7 @@ class Bytes: AllStatic {
template <typename T> template <typename T>
static inline void put_native(void* p, T x) { static inline void put_native(void* p, T x) {
assert(p != NULL, "null pointer"); assert(p != nullptr, "null pointer");
if (is_aligned(p, sizeof(T))) { if (is_aligned(p, sizeof(T))) {
*(T*)p = x; *(T*)p = x;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
__ pop(tmp2); __ pop(tmp2);
__ pop(tmp1); __ pop(tmp1);
#endif /* _LP64 */ #endif /* _LP64 */
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
@ -318,7 +318,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
#ifdef ASSERT #ifdef ASSERT
address start = __ pc(); address start = __ pc();
#endif #endif
Metadata* o = NULL; Metadata* o = nullptr;
__ mov_metadata(_obj, o); __ mov_metadata(_obj, o);
#ifdef ASSERT #ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) { for (int i = 0; i < _bytes_to_copy; i++) {
@ -333,7 +333,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
#ifdef ASSERT #ifdef ASSERT
address start = __ pc(); address start = __ pc();
#endif #endif
jobject o = NULL; jobject o = nullptr;
__ movoop(_obj, o); __ movoop(_obj, o);
#ifdef ASSERT #ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) { for (int i = 0; i < _bytes_to_copy; i++) {
@ -404,7 +404,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc(); address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry); NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL; address target = nullptr;
relocInfo::relocType reloc_type = relocInfo::none; relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) { switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;

View File

@ -144,7 +144,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() {
address LIR_Assembler::float_constant(float f) { address LIR_Assembler::float_constant(float f) {
address const_addr = __ float_constant(f); address const_addr = __ float_constant(f);
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return __ code()->consts()->start(); return __ code()->consts()->start();
} else { } else {
@ -155,7 +155,7 @@ address LIR_Assembler::float_constant(float f) {
address LIR_Assembler::double_constant(double d) { address LIR_Assembler::double_constant(double d) {
address const_addr = __ double_constant(d); address const_addr = __ double_constant(d);
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return __ code()->consts()->start(); return __ code()->consts()->start();
} else { } else {
@ -321,7 +321,7 @@ void LIR_Assembler::osr_entry() {
Label L; Label L;
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD); __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("locked object is NULL"); __ stop("locked object is null");
__ bind(L); __ bind(L);
} }
#endif #endif
@ -373,14 +373,14 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) {
} }
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL; jobject o = nullptr;
PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o); __ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
} }
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
Metadata* o = NULL; Metadata* o = nullptr;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
__ mov_metadata(reg, o); __ mov_metadata(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
@ -400,7 +400,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
int LIR_Assembler::emit_exception_handler() { int LIR_Assembler::emit_exception_handler() {
// generate code for exception handler // generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size()); address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// not enough space left for the handler // not enough space left for the handler
bailout("exception handler overflow"); bailout("exception handler overflow");
return -1; return -1;
@ -450,7 +450,7 @@ int LIR_Assembler::emit_unwind_handler() {
} }
// Perform needed unlocking // Perform needed unlocking
MonitorExitStub* stub = NULL; MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) { if (method()->is_synchronized()) {
monitor_address(0, FrameMap::rax_opr); monitor_address(0, FrameMap::rax_opr);
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
@ -483,7 +483,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly // Emit the slow path assembly
if (stub != NULL) { if (stub != nullptr) {
stub->emit_code(this); stub->emit_code(this);
} }
@ -494,7 +494,7 @@ int LIR_Assembler::emit_unwind_handler() {
int LIR_Assembler::emit_deopt_handler() { int LIR_Assembler::emit_deopt_handler() {
// generate code for exception handler // generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size()); address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// not enough space left for the handler // not enough space left for the handler
bailout("deopt handler overflow"); bailout("deopt handler overflow");
return -1; return -1;
@ -541,7 +541,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
guarantee(info != NULL, "Shouldn't be NULL"); guarantee(info != nullptr, "Shouldn't be null");
int offset = __ offset(); int offset = __ offset();
#ifdef _LP64 #ifdef _LP64
const Register poll_addr = rscratch1; const Register poll_addr = rscratch1;
@ -733,7 +733,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
case T_OBJECT: // fall through case T_OBJECT: // fall through
case T_ARRAY: case T_ARRAY:
if (c->as_jobject() == NULL) { if (c->as_jobject() == nullptr) {
if (UseCompressedOops && !wide) { if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), NULL_WORD); __ movl(as_Address(addr), NULL_WORD);
} else { } else {
@ -799,7 +799,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere(); ShouldNotReachHere();
}; };
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check(null_check_here, info); add_debug_info_for_null_check(null_check_here, info);
} }
} }
@ -947,7 +947,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
LIR_Address* to_addr = dest->as_address_ptr(); LIR_Address* to_addr = dest->as_address_ptr();
PatchingStub* patch = NULL; PatchingStub* patch = nullptr;
Register compressed_src = rscratch1; Register compressed_src = rscratch1;
if (is_reference_type(type)) { if (is_reference_type(type)) {
@ -1043,7 +1043,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
assert(base != from_hi, "can't be"); assert(base != from_hi, "can't be");
assert(index == noreg || (index != base && index != from_hi), "can't handle this"); assert(index == noreg || (index != base && index != from_hi), "can't handle this");
__ movl(as_Address_hi(to_addr), from_hi); __ movl(as_Address_hi(to_addr), from_hi);
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, lir_patch_high, base, info); patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low; patch_code = lir_patch_low;
@ -1052,7 +1052,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else { } else {
assert(index == noreg || (index != base && index != from_lo), "can't handle this"); assert(index == noreg || (index != base && index != from_lo), "can't handle this");
__ movl(as_Address_lo(to_addr), from_lo); __ movl(as_Address_lo(to_addr), from_lo);
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, lir_patch_low, base, info); patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high; patch_code = lir_patch_high;
@ -1080,7 +1080,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check(null_check_here, info); add_debug_info_for_null_check(null_check_here, info);
} }
@ -1198,12 +1198,12 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
break; break;
} }
PatchingStub* patch = NULL; PatchingStub* patch = nullptr;
if (patch_code != lir_patch_none) { if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(from_addr.disp() != 0, "must have"); assert(from_addr.disp() != 0, "must have");
} }
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }
@ -1270,7 +1270,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
// addresses with 2 registers are only formed as a result of // addresses with 2 registers are only formed as a result of
// array access so this code will never have to deal with // array access so this code will never have to deal with
// patches or null checks. // patches or null checks.
assert(info == NULL && patch == NULL, "must be"); assert(info == nullptr && patch == nullptr, "must be");
__ lea(to_hi, as_Address(addr)); __ lea(to_hi, as_Address(addr));
__ movl(to_lo, Address(to_hi, 0)); __ movl(to_lo, Address(to_hi, 0));
__ movl(to_hi, Address(to_hi, BytesPerWord)); __ movl(to_hi, Address(to_hi, BytesPerWord));
@ -1278,7 +1278,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
assert(base != to_hi, "can't be"); assert(base != to_hi, "can't be");
assert(index == noreg || (index != base && index != to_hi), "can't handle this"); assert(index == noreg || (index != base && index != to_hi), "can't handle this");
__ movl(to_hi, as_Address_hi(addr)); __ movl(to_hi, as_Address_hi(addr));
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, lir_patch_high, base, info); patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low; patch_code = lir_patch_low;
@ -1287,7 +1287,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else { } else {
assert(index == noreg || (index != base && index != to_lo), "can't handle this"); assert(index == noreg || (index != base && index != to_lo), "can't handle this");
__ movl(to_lo, as_Address_lo(addr)); __ movl(to_lo, as_Address_lo(addr));
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, lir_patch_low, base, info); patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high; patch_code = lir_patch_high;
@ -1339,7 +1339,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
ShouldNotReachHere(); ShouldNotReachHere();
} }
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info); patching_epilog(patch, patch_code, addr->base()->as_register(), info);
} }
@ -1401,18 +1401,18 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
#ifdef ASSERT #ifdef ASSERT
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
if (op->block() != NULL) _branch_target_blocks.append(op->block()); if (op->block() != nullptr) _branch_target_blocks.append(op->block());
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
#endif #endif
if (op->cond() == lir_cond_always) { if (op->cond() == lir_cond_always) {
if (op->info() != NULL) add_debug_info_for_branch(op->info()); if (op->info() != nullptr) add_debug_info_for_branch(op->info());
__ jmp (*(op->label())); __ jmp (*(op->label()));
} else { } else {
Assembler::Condition acond = Assembler::zero; Assembler::Condition acond = Assembler::zero;
if (op->code() == lir_cond_float_branch) { if (op->code() == lir_cond_float_branch) {
assert(op->ublock() != NULL, "must have unordered successor"); assert(op->ublock() != nullptr, "must have unordered successor");
__ jcc(Assembler::parity, *(op->ublock()->label())); __ jcc(Assembler::parity, *(op->ublock()->label()));
switch(op->cond()) { switch(op->cond()) {
case lir_cond_equal: acond = Assembler::equal; break; case lir_cond_equal: acond = Assembler::equal; break;
@ -1569,7 +1569,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
} }
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
assert(op->stub() != NULL, "stub required"); assert(op->stub() != nullptr, "stub required");
__ cmpl(dest->as_register(), 0x80000000); __ cmpl(dest->as_register(), 0x80000000);
__ jcc(Assembler::equal, *op->stub()->entry()); __ jcc(Assembler::equal, *op->stub()->entry());
__ bind(*op->stub()->continuation()); __ bind(*op->stub()->continuation());
@ -1682,17 +1682,17 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
// check if it needs to be profiled // check if it needs to be profiled
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
if (op->should_profile()) { if (op->should_profile()) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != nullptr, "Should have method");
int bci = op->profiled_bci(); int bci = op->profiled_bci();
md = method->method_data_or_null(); md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci); data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check"); assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
} }
Label profile_cast_success, profile_cast_failure; Label profile_cast_success, profile_cast_failure;
@ -1798,7 +1798,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} }
} else { } else {
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo); __ push(klass_RInfo);
__ push(k_RInfo); __ push(k_RInfo);
@ -1842,17 +1842,17 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
// check if it needs to be profiled // check if it needs to be profiled
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
if (op->should_profile()) { if (op->should_profile()) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != nullptr, "Should have method");
int bci = op->profiled_bci(); int bci = op->profiled_bci();
md = method->method_data_or_null(); md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci); data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check"); assert(data != nullptr, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
} }
Label profile_cast_success, profile_cast_failure, done; Label profile_cast_success, profile_cast_failure, done;
@ -1882,7 +1882,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get instance klass (it's already uncompressed) // get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo); __ push(klass_RInfo);
__ push(k_RInfo); __ push(k_RInfo);
@ -2021,7 +2021,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr1->is_stack()) { } else if (opr1->is_stack()) {
stack2reg(opr1, result, result->type()); stack2reg(opr1, result, result->type());
} else if (opr1->is_constant()) { } else if (opr1->is_constant()) {
const2reg(opr1, result, lir_patch_none, NULL); const2reg(opr1, result, lir_patch_none, nullptr);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -2053,7 +2053,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else if (opr2->is_stack()) { } else if (opr2->is_stack()) {
stack2reg(opr2, result, result->type()); stack2reg(opr2, result, result->type());
} else if (opr2->is_constant()) { } else if (opr2->is_constant()) {
const2reg(opr2, result, lir_patch_none, NULL); const2reg(opr2, result, lir_patch_none, nullptr);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -2063,7 +2063,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {
assert(left == dest, "left and dest must be equal"); assert(left == dest, "left and dest must be equal");
@ -2259,7 +2259,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
raddr = frame_map()->address_for_slot(right->single_stack_ix()); raddr = frame_map()->address_for_slot(right->single_stack_ix());
} else if (right->is_constant()) { } else if (right->is_constant()) {
address const_addr = float_constant(right->as_jfloat()); address const_addr = float_constant(right->as_jfloat());
assert(const_addr != NULL, "incorrect float/double constant maintenance"); assert(const_addr != nullptr, "incorrect float/double constant maintenance");
// hack for now // hack for now
raddr = __ as_Address(InternalAddress(const_addr)); raddr = __ as_Address(InternalAddress(const_addr));
} else { } else {
@ -2666,10 +2666,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
if (c->type() == T_INT) { if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint()); __ cmpl(reg1, c->as_jint());
} else if (c->type() == T_METADATA) { } else if (c->type() == T_METADATA) {
// All we need for now is a comparison with NULL for equality. // All we need for now is a comparison with null for equality.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata(); Metadata* m = c->as_metadata();
if (m == NULL) { if (m == nullptr) {
__ cmpptr(reg1, NULL_WORD); __ cmpptr(reg1, NULL_WORD);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
@ -2677,7 +2677,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
} else if (is_reference_type(c->type())) { } else if (is_reference_type(c->type())) {
// In 64bit oops are single register // In 64bit oops are single register
jobject o = c->as_jobject(); jobject o = c->as_jobject();
if (o == NULL) { if (o == nullptr) {
__ cmpptr(reg1, NULL_WORD); __ cmpptr(reg1, NULL_WORD);
} else { } else {
__ cmpoop(reg1, o, rscratch1); __ cmpoop(reg1, o, rscratch1);
@ -2687,7 +2687,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
} }
// cpu register - address // cpu register - address
} else if (opr2->is_address()) { } else if (opr2->is_address()) {
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
} }
__ cmpl(reg1, as_Address(opr2->as_address_ptr())); __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
@ -2737,7 +2737,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
} else if (opr2->is_address()) { } else if (opr2->is_address()) {
// xmm register - address // xmm register - address
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
} }
__ ucomiss(reg1, as_Address(opr2->as_address_ptr())); __ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
@ -2758,7 +2758,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
} else if (opr2->is_address()) { } else if (opr2->is_address()) {
// xmm register - address // xmm register - address
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
} }
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
@ -2781,7 +2781,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ movoop(rscratch1, c->as_jobject()); __ movoop(rscratch1, c->as_jobject());
} }
#endif // LP64 #endif // LP64
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
} }
// special case: address - constant // special case: address - constant
@ -2887,7 +2887,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
void LIR_Assembler::emit_static_call_stub() { void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size()); address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) { if (stub == nullptr) {
bailout("static call stub overflow"); bailout("static call stub overflow");
return; return;
} }
@ -2897,7 +2897,7 @@ void LIR_Assembler::emit_static_call_stub() {
// make sure that the displacement word of the call ends up word aligned // make sure that the displacement word of the call ends up word aligned
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
__ relocate(static_stub_Relocation::spec(call_pc)); __ relocate(static_stub_Relocation::spec(call_pc));
__ mov_metadata(rbx, (Metadata*)NULL); __ mov_metadata(rbx, (Metadata*)nullptr);
// must be set to -1 at code generation time // must be set to -1 at code generation time
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp // On 64bit this will die since it will take a movq & jmp, must be only a jmp
@ -3073,11 +3073,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
int flags = op->flags(); int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (is_reference_type(basic_type)) basic_type = T_OBJECT; if (is_reference_type(basic_type)) basic_type = T_OBJECT;
// if we don't know anything, just go through the generic arraycopy // if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) { if (default_type == nullptr) {
// save outgoing arguments on stack in case call to System.arraycopy is needed // save outgoing arguments on stack in case call to System.arraycopy is needed
// HACK ALERT. This code used to push the parameters in a hardwired fashion // HACK ALERT. This code used to push the parameters in a hardwired fashion
// for interpreter calling conventions. Now we have to do it in new style conventions. // for interpreter calling conventions. Now we have to do it in new style conventions.
@ -3096,7 +3096,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
address copyfunc_addr = StubRoutines::generic_arraycopy(); address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required"); assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
#ifdef _LP64 #ifdef _LP64
@ -3169,7 +3169,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
return; return;
} }
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type); int elem_size = type2aelembytes(basic_type);
Address::ScaleFactor scale; Address::ScaleFactor scale;
@ -3199,7 +3199,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// length and pos's are all sign extended at this point on 64bit // length and pos's are all sign extended at this point on 64bit
// test for NULL // test for null
if (flags & LIR_OpArrayCopy::src_null_check) { if (flags & LIR_OpArrayCopy::src_null_check) {
__ testptr(src, src); __ testptr(src, src);
__ jcc(Assembler::zero, *stub->entry()); __ jcc(Assembler::zero, *stub->entry());
@ -3280,7 +3280,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ load_klass(src, src, tmp_load_klass); __ load_klass(src, src, tmp_load_klass);
__ load_klass(dst, dst, tmp_load_klass); __ load_klass(dst, dst, tmp_load_klass);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
__ push(src); __ push(src);
__ push(dst); __ push(dst);
@ -3296,7 +3296,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ pop(src); __ pop(src);
address copyfunc_addr = StubRoutines::checkcast_arraycopy(); address copyfunc_addr = StubRoutines::checkcast_arraycopy();
if (copyfunc_addr != NULL) { // use stub if available if (copyfunc_addr != nullptr) { // use stub if available
// src is not a sub class of dst so we have to do a // src is not a sub class of dst so we have to do a
// per-element check. // per-element check.
@ -3501,7 +3501,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register(); Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register(); Register lock = op->lock_opr()->as_register();
if (UseHeavyMonitors) { if (UseHeavyMonitors) {
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
__ null_check(obj); __ null_check(obj);
} }
@ -3510,7 +3510,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible // add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info()); add_debug_info_for_null_check(null_check_offset, op->info());
} }
// done // done
@ -3528,7 +3528,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register(); Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info(); CodeEmitInfo* info = op->info();
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }
@ -3549,9 +3549,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types // Update counter for all call types
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
@ -3564,7 +3564,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, recv); assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do // statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -3589,7 +3589,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time // VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) { if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -3626,7 +3626,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Label update, next, none; Label update, next, none;
bool do_null = !not_null; bool do_null = !not_null;
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
assert(do_null || do_update, "why are we here?"); assert(do_null || do_update, "why are we here?");
@ -3661,7 +3661,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (do_update) { if (do_update) {
#ifdef ASSERT #ifdef ASSERT
if (exact_klass != NULL) { if (exact_klass != nullptr) {
Label ok; Label ok;
__ load_klass(tmp, tmp, tmp_load_klass); __ load_klass(tmp, tmp, tmp_load_klass);
__ push(tmp); __ push(tmp);
@ -3674,8 +3674,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
} }
#endif #endif
if (!no_conflict) { if (!no_conflict) {
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
if (exact_klass != NULL) { if (exact_klass != nullptr) {
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
} else { } else {
__ load_klass(tmp, tmp, tmp_load_klass); __ load_klass(tmp, tmp, tmp_load_klass);
@ -3703,7 +3703,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ jccb(Assembler::zero, next); __ jccb(Assembler::zero, next);
} }
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ movptr(tmp, mdo_addr); __ movptr(tmp, mdo_addr);
@ -3723,7 +3723,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
} }
} else { } else {
// There's a single possible klass at this profile point // There's a single possible klass at this profile point
assert(exact_klass != NULL, "should be"); assert(exact_klass != nullptr, "should be");
if (TypeEntries::is_type_none(current_klass)) { if (TypeEntries::is_type_none(current_klass)) {
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
__ xorptr(tmp, mdo_addr); __ xorptr(tmp, mdo_addr);
@ -3754,7 +3754,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// first time here. Set profile type. // first time here. Set profile type.
__ movptr(mdo_addr, tmp); __ movptr(mdo_addr, tmp);
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ movptr(tmp, mdo_addr); __ movptr(tmp, mdo_addr);
@ -3861,7 +3861,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
assert(src->is_address(), "must be an address"); assert(src->is_address(), "must be an address");
assert(dest->is_register(), "must be a register"); assert(dest->is_register(), "must be a register");
PatchingStub* patch = NULL; PatchingStub* patch = nullptr;
if (patch_code != lir_patch_none) { if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
} }
@ -3870,7 +3870,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
LIR_Address* addr = src->as_address_ptr(); LIR_Address* addr = src->as_address_ptr();
__ lea(reg, as_Address(addr)); __ lea(reg, as_Address(addr));
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info); patching_epilog(patch, patch_code, addr->base()->as_register(), info);
} }
} }
@ -3880,7 +3880,7 @@ void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, Co
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
assert(!tmp->is_valid(), "don't need temporary"); assert(!tmp->is_valid(), "don't need temporary");
__ call(RuntimeAddress(dest)); __ call(RuntimeAddress(dest));
if (info != NULL) { if (info != nullptr) {
add_call_info_here(info); add_call_info_here(info);
} }
__ post_call_nop(); __ post_call_nop();
@ -3890,7 +3890,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
assert(type == T_LONG, "only for volatile long fields"); assert(type == T_LONG, "only for volatile long fields");
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
} }

View File

@ -125,7 +125,7 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
return false; return false;
} }
Constant* c = v->as_Constant(); Constant* c = v->as_Constant();
if (c && c->state_before() == NULL) { if (c && c->state_before() == nullptr) {
// constants of any type can be stored directly, except for // constants of any type can be stored directly, except for
// unloaded object constants. // unloaded object constants.
return true; return true;
@ -143,7 +143,7 @@ bool LIRGenerator::can_inline_as_constant(Value v) const {
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
if (c->type() == T_LONG) return false; if (c->type() == T_LONG) return false;
return c->type() != T_OBJECT || c->as_jobject() == NULL; return c->type() != T_OBJECT || c->as_jobject() == nullptr;
} }
@ -312,7 +312,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop // "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT); LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL; CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) { if (x->needs_null_check()) {
info_for_exception = state_for(x); info_for_exception = state_for(x);
} }
@ -383,7 +383,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
bool must_load_right = false; bool must_load_right = false;
if (right.is_constant()) { if (right.is_constant()) {
LIR_Const* c = right.result()->as_constant_ptr(); LIR_Const* c = right.result()->as_constant_ptr();
assert(c != NULL, "invalid constant"); assert(c != nullptr, "invalid constant");
assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
if (c->type() == T_FLOAT) { if (c->type() == T_FLOAT) {
@ -429,7 +429,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
left.load_item_force(cc->at(0)); left.load_item_force(cc->at(0));
right.load_item_force(cc->at(1)); right.load_item_force(cc->at(1));
address entry = NULL; address entry = nullptr;
switch (x->op()) { switch (x->op()) {
case Bytecodes::_frem: case Bytecodes::_frem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
@ -499,7 +499,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info)); __ branch(lir_cond_equal, new DivByZeroStub(info));
address entry = NULL; address entry = nullptr;
switch (x->op()) { switch (x->op()) {
case Bytecodes::_lrem: case Bytecodes::_lrem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
@ -527,7 +527,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
right.load_item(); right.load_item();
LIR_Opr reg = FrameMap::long0_opr; LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr);
LIR_Opr result = rlock_result(x); LIR_Opr result = rlock_result(x);
__ move(reg, result); __ move(reg, result);
} else { } else {
@ -539,7 +539,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
// don't load constants to save register // don't load constants to save register
right.load_nonconstant(); right.load_nonconstant();
rlock_result(x); rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
} }
} }
@ -583,7 +583,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info)); __ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion). // Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL; info = nullptr;
} }
LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
if (x->op() == Bytecodes::_irem) { if (x->op() == Bytecodes::_irem) {
@ -650,7 +650,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
// when an operand with use count 1 is the left operand, then it is // when an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary // likely that no move for 2-operand-LIR-form is necessary
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands(); x->swap_operands();
} }
@ -691,7 +691,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
void LIRGenerator::do_LogicOp(LogicOp* x) { void LIRGenerator::do_LogicOp(LogicOp* x) {
// when an operand with use count 1 is the left operand, then it is // when an operand with use count 1 is the left operand, then it is
// likely that no move for 2-operand-LIR-form is necessary // likely that no move for 2-operand-LIR-form is necessary
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands(); x->swap_operands();
} }
@ -867,7 +867,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
LIR_Opr calc_result = rlock_result(x); LIR_Opr calc_result = rlock_result(x);
LIR_Opr result_reg = result_register_for(x->type()); LIR_Opr result_reg = result_register_for(x->type());
CallingConvention* cc = NULL; CallingConvention* cc = nullptr;
if (x->id() == vmIntrinsics::_dpow) { if (x->id() == vmIntrinsics::_dpow) {
LIRItem value1(x->argument_at(1), this); LIRItem value1(x->argument_at(1), this);
@ -892,49 +892,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
result_reg = tmp; result_reg = tmp;
switch(x->id()) { switch(x->id()) {
case vmIntrinsics::_dexp: case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) { if (StubRoutines::dexp() != nullptr) {
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dlog: case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) { if (StubRoutines::dlog() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dlog10: case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) { if (StubRoutines::dlog10() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dpow: case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) { if (StubRoutines::dpow() != nullptr) {
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dsin: case vmIntrinsics::_dsin:
if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) { if (VM_Version::supports_sse2() && StubRoutines::dsin() != nullptr) {
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dcos: case vmIntrinsics::_dcos:
if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) { if (VM_Version::supports_sse2() && StubRoutines::dcos() != nullptr) {
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dtan: case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) { if (StubRoutines::dtan() != nullptr) {
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
@ -945,49 +945,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
#else #else
switch (x->id()) { switch (x->id()) {
case vmIntrinsics::_dexp: case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) { if (StubRoutines::dexp() != nullptr) {
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dlog: case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) { if (StubRoutines::dlog() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dlog10: case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) { if (StubRoutines::dlog10() != nullptr) {
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dpow: case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) { if (StubRoutines::dpow() != nullptr) {
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dsin: case vmIntrinsics::_dsin:
if (StubRoutines::dsin() != NULL) { if (StubRoutines::dsin() != nullptr) {
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dcos: case vmIntrinsics::_dcos:
if (StubRoutines::dcos() != NULL) { if (StubRoutines::dcos() != nullptr) {
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
} }
break; break;
case vmIntrinsics::_dtan: case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) { if (StubRoutines::dtan() != nullptr) {
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
} else { } else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
@ -1259,7 +1259,7 @@ void LIRGenerator::do_Convert(Convert* x) {
// arguments of lir_convert // arguments of lir_convert
LIR_Opr conv_input = input; LIR_Opr conv_input = input;
LIR_Opr conv_result = result; LIR_Opr conv_result = result;
ConversionStub* stub = NULL; ConversionStub* stub = nullptr;
if (fixed_input) { if (fixed_input) {
conv_input = fixed_register_for(input->type()); conv_input = fixed_register_for(input->type());
@ -1335,7 +1335,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); LIRItem length(x->length(), this);
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
// and therefore provide the state before the parameters have been consumed // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
@ -1368,14 +1368,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); Values* dims = x->dims();
int i = dims->length(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) { while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this); LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size); items->at_put(i, size);
} }
// Evaluate state_for early since it may emit code. // Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
@ -1424,7 +1424,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) { void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this); LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register, // must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization) // and before the obj is loaded (the latter is for deoptimization)
@ -1439,10 +1439,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
CodeStub* stub; CodeStub* stub;
if (x->is_incompatible_class_change_check()) { if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
} else if (x->is_invokespecial_receiver_check()) { } else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
} else { } else {
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
@ -1464,7 +1464,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
// result and test object may not be in same register // result and test object may not be in same register
LIR_Opr reg = rlock_result(x); LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if ((!x->klass()->is_loaded() || PatchALot)) { if ((!x->klass()->is_loaded() || PatchALot)) {
// must do this before locking the destination register as an oop register // must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ void LinearScan::allocate_fpu_stack() {
// (To minimize the amount of work we have to do if we have to merge FPU stacks) // (To minimize the amount of work we have to do if we have to merge FPU stacks)
if (ComputeExactFPURegisterUsage) { if (ComputeExactFPURegisterUsage) {
Interval* intervals_in_register, *intervals_in_memory; Interval* intervals_in_register, *intervals_in_memory;
create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL); create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, nullptr);
// ignore memory intervals by overwriting intervals_in_memory // ignore memory intervals by overwriting intervals_in_memory
// the dummy interval is needed to enforce the walker to walk until the given id: // the dummy interval is needed to enforce the walker to walk until the given id:
@ -109,14 +109,14 @@ void LinearScan::allocate_fpu_stack() {
FpuStackAllocator alloc(ir()->compilation(), this); FpuStackAllocator alloc(ir()->compilation(), this);
_fpu_stack_allocator = &alloc; _fpu_stack_allocator = &alloc;
alloc.allocate(); alloc.allocate();
_fpu_stack_allocator = NULL; _fpu_stack_allocator = nullptr;
} }
FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator) FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
: _compilation(compilation) : _compilation(compilation)
, _allocator(allocator) , _allocator(allocator)
, _lir(NULL) , _lir(nullptr)
, _pos(-1) , _pos(-1)
, _sim(compilation) , _sim(compilation)
, _temp_sim(compilation) , _temp_sim(compilation)
@ -136,14 +136,14 @@ void FpuStackAllocator::allocate() {
} }
#endif #endif
assert(fpu_stack_state != NULL || assert(fpu_stack_state != nullptr ||
block->end()->as_Base() != NULL || block->end()->as_Base() != nullptr ||
block->is_set(BlockBegin::exception_entry_flag), block->is_set(BlockBegin::exception_entry_flag),
"FPU stack state must be present due to linear-scan order for FPU stack allocation"); "FPU stack state must be present due to linear-scan order for FPU stack allocation");
// note: exception handler entries always start with an empty fpu stack // note: exception handler entries always start with an empty fpu stack
// because stack merging would be too complicated // because stack merging would be too complicated
if (fpu_stack_state != NULL) { if (fpu_stack_state != nullptr) {
sim()->read_state(fpu_stack_state); sim()->read_state(fpu_stack_state);
} else { } else {
sim()->clear(); sim()->clear();
@ -186,7 +186,7 @@ void FpuStackAllocator::allocate_block(BlockBegin* block) {
LIR_Op2* op2 = op->as_Op2(); LIR_Op2* op2 = op->as_Op2();
LIR_OpCall* opCall = op->as_OpCall(); LIR_OpCall* opCall = op->as_OpCall();
if (branch != NULL && branch->block() != NULL) { if (branch != nullptr && branch->block() != nullptr) {
if (!processed_merge) { if (!processed_merge) {
// propagate stack at first branch to a successor // propagate stack at first branch to a successor
processed_merge = true; processed_merge = true;
@ -195,11 +195,11 @@ void FpuStackAllocator::allocate_block(BlockBegin* block) {
assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches"); assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
} }
} else if (op1 != NULL) { } else if (op1 != nullptr) {
handle_op1(op1); handle_op1(op1);
} else if (op2 != NULL) { } else if (op2 != nullptr) {
handle_op2(op2); handle_op2(op2);
} else if (opCall != NULL) { } else if (opCall != nullptr) {
handle_opCall(opCall); handle_opCall(opCall);
} }
@ -256,7 +256,7 @@ void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
} }
#endif #endif
if (xhandler->entry_code() == NULL) { if (xhandler->entry_code() == nullptr) {
// need entry code to clear FPU stack // need entry code to clear FPU stack
LIR_List* entry_code = new LIR_List(_compilation); LIR_List* entry_code = new LIR_List(_compilation);
entry_code->jump(xhandler->entry_block()); entry_code->jump(xhandler->entry_block());
@ -280,7 +280,7 @@ void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
switch (op->code()) { switch (op->code()) {
case lir_move: case lir_move:
assert(op->as_Op1() != NULL, "must be LIR_Op1"); assert(op->as_Op1() != nullptr, "must be LIR_Op1");
assert(pos() != insts->length() - 1, "must not be last operation"); assert(pos() != insts->length() - 1, "must not be last operation");
handle_op1((LIR_Op1*)op); handle_op1((LIR_Op1*)op);
@ -1042,7 +1042,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
intArray* state = sux->fpu_stack_state(); intArray* state = sux->fpu_stack_state();
LIR_List* instrs = new LIR_List(_compilation); LIR_List* instrs = new LIR_List(_compilation);
if (state != NULL) { if (state != nullptr) {
// Merge with a successors that already has a FPU stack state // Merge with a successors that already has a FPU stack state
// the block must only have one successor because critical edges must been split // the block must only have one successor because critical edges must been split
FpuStackSim* cur_sim = sim(); FpuStackSim* cur_sim = sim();
@ -1088,7 +1088,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
} }
// check if new state is same // check if new state is same
if (sux->fpu_stack_state() != NULL) { if (sux->fpu_stack_state() != nullptr) {
intArray* sux_state = sux->fpu_stack_state(); intArray* sux_state = sux->fpu_stack_state();
assert(state->length() == sux_state->length(), "overwriting existing stack state"); assert(state->length() == sux_state->length(), "overwriting existing stack state");
for (int j = 0; j < state->length(); j++) { for (int j = 0; j < state->length(); j++) {
@ -1114,7 +1114,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
BlockBegin* sux = block->sux_at(i); BlockBegin* sux = block->sux_at(i);
intArray* sux_state = sux->fpu_stack_state(); intArray* sux_state = sux->fpu_stack_state();
assert(sux_state != NULL, "no fpu state"); assert(sux_state != nullptr, "no fpu state");
assert(cur_state->length() == sux_state->length(), "incorrect length"); assert(cur_state->length() == sux_state->length(), "incorrect length");
for (int i = 0; i < cur_state->length(); i++) { for (int i = 0; i < cur_state->length(); i++) {
assert(cur_state->at(i) == sux_state->at(i), "element not equal"); assert(cur_state->at(i) == sux_state->at(i), "element not equal");

View File

@ -89,7 +89,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
subptr(hdr, rsp); subptr(hdr, rsp);
andptr(hdr, aligned_mask - (int)os::vm_page_size()); andptr(hdr, aligned_mask - (int)os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header // for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking) // location (null in the displaced hdr location indicates recursive locking)
movptr(Address(disp_hdr, 0), hdr); movptr(Address(disp_hdr, 0), hdr);
// otherwise we don't care about the result and handle locking via runtime call // otherwise we don't care about the result and handle locking via runtime call
jcc(Assembler::notZero, slow_case); jcc(Assembler::notZero, slow_case);
@ -110,7 +110,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// load displaced header // load displaced header
movptr(hdr, Address(disp_hdr, 0)); movptr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking // if the loaded hdr is null we had recursive locking
testptr(hdr, hdr); testptr(hdr, hdr);
// if we had recursive locking, we are done // if we had recursive locking, we are done
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
@ -279,7 +279,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver); verify_oop(receiver);
// explicit NULL check not needed since load from [klass_offset] causes a trap // explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset(); int start_offset = offset();
@ -322,7 +322,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
// C1 code is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub // C1 code is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */); bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -119,7 +119,7 @@
void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN; void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN;
// This platform only uses signal-based null checks. The Label is not needed. // This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); } void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); }
void load_parameter(int offset_in_words, Register reg); void load_parameter(int offset_in_words, Register reg);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
int call_offset = -1; int call_offset = -1;
if (!align_stack) { if (!align_stack) {
set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1); set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
} else { } else {
address the_pc = pc(); address the_pc = pc();
call_offset = offset(); call_offset = offset();
@ -673,7 +673,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// Save registers, if required. // Save registers, if required.
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
OopMap* oop_map = NULL; OopMap* oop_map = nullptr;
switch (id) { switch (id) {
case forward_exception_id: case forward_exception_id:
// We're handling an exception in the context of a compiled frame. // We're handling an exception in the context of a compiled frame.
@ -870,7 +870,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
const int num_rt_args = 2; // thread + dummy const int num_rt_args = 2; // thread + dummy
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
OopMap* oop_map = save_live_registers(sasm, num_rt_args); OopMap* oop_map = save_live_registers(sasm, num_rt_args);
@ -886,7 +886,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
__ get_thread(thread); __ get_thread(thread);
__ push(thread); __ push(thread);
#endif // _LP64 #endif // _LP64
__ set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1); __ set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
// do the call // do the call
__ call(RuntimeAddress(target)); __ call(RuntimeAddress(target));
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
@ -1000,7 +1000,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
bool save_fpu_registers = true; bool save_fpu_registers = true;
// stub code & info for the different stubs // stub code & info for the different stubs
OopMapSet* oop_maps = NULL; OopMapSet* oop_maps = nullptr;
switch (id) { switch (id) {
case forward_exception_id: case forward_exception_id:
{ {
@ -1267,7 +1267,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
Label miss; Label miss;
__ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, nullptr, &miss);
// fallthrough on success: // fallthrough on success:
__ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
@ -1342,7 +1342,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
restore_live_registers(sasm); restore_live_registers(sasm);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ leave(); __ leave();
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
} }
@ -1492,7 +1492,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
restore_live_registers(sasm); restore_live_registers(sasm);
__ leave(); __ leave();
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ int C2SafepointPollStub::max_size() const {
} }
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) { void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();

View File

@ -130,7 +130,7 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool
if (!is_stub) { if (!is_stub) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
#ifdef _LP64 #ifdef _LP64
if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { if (BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
// We put the non-hot code of the nmethod entry barrier out-of-line in a stub. // We put the non-hot code of the nmethod entry barrier out-of-line in a stub.
Label dummy_slow_path; Label dummy_slow_path;
Label dummy_continuation; Label dummy_continuation;
@ -147,7 +147,7 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool
} }
#else #else
// Don't bother with out-of-line nmethod entry barrier stub for x86_32. // Don't bother with out-of-line nmethod entry barrier stub for x86_32.
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */); bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */);
#endif #endif
} }
} }
@ -228,7 +228,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
imulptr(scrReg, scrReg, RTMAbortRatio); imulptr(scrReg, scrReg, RTMAbortRatio);
cmpptr(tmpReg, scrReg); cmpptr(tmpReg, scrReg);
jccb(Assembler::below, L_check_always_rtm1); jccb(Assembler::below, L_check_always_rtm1);
if (method_data != NULL) { if (method_data != nullptr) {
// set rtm_state to "no rtm" in MDO // set rtm_state to "no rtm" in MDO
mov_metadata(tmpReg, method_data); mov_metadata(tmpReg, method_data);
lock(); lock();
@ -242,7 +242,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset())); movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate); cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
jccb(Assembler::below, L_done); jccb(Assembler::below, L_done);
if (method_data != NULL) { if (method_data != nullptr) {
// set rtm_state to "always rtm" in MDO // set rtm_state to "always rtm" in MDO
mov_metadata(tmpReg, method_data); mov_metadata(tmpReg, method_data);
lock(); lock();
@ -260,7 +260,7 @@ void C2_MacroAssembler::rtm_profiling(Register abort_status_Reg,
Metadata* method_data, Metadata* method_data,
bool profile_rtm) { bool profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
// update rtm counters based on rax value at abort // update rtm counters based on rax value at abort
// reads abort_status_Reg, updates flags // reads abort_status_Reg, updates flags
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters)); lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
@ -270,7 +270,7 @@ void C2_MacroAssembler::rtm_profiling(Register abort_status_Reg,
if (RTMRetryCount > 0) { if (RTMRetryCount > 0) {
push(abort_status_Reg); push(abort_status_Reg);
} }
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data); rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
// restore abort status // restore abort status
if (RTMRetryCount > 0) { if (RTMRetryCount > 0) {
@ -356,7 +356,7 @@ void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Regi
// tmpReg, scrReg and flags are killed // tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
} }
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(stack_rtm_counters != nullptr, "should not be null when profiling RTM");
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg); atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement); bind(L_noincrement);
} }
@ -416,7 +416,7 @@ void C2_MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, R
// tmpReg, scrReg and flags are killed // tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
} }
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg); atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement); bind(L_noincrement);
} }
@ -661,7 +661,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
lock(); lock();
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
// If we weren't able to swing _owner from NULL to the BasicLock // If we weren't able to swing _owner from null to the BasicLock
// then take the slow path. // then take the slow path.
jccb (Assembler::notZero, NO_COUNT); jccb (Assembler::notZero, NO_COUNT);
// update _owner from BasicLock to thread // update _owner from BasicLock to thread

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ int IntelJccErratum::jcc_erratum_taint_node(MachNode* node, PhaseRegAlloc* regal
int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegAlloc* regalloc) { int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegAlloc* regalloc) {
ResourceMark rm; ResourceMark rm;
int nop_size = 0; int nop_size = 0;
MachNode* last_m = NULL; MachNode* last_m = nullptr;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) { for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
const Block* const block = cfg->get_block(i); const Block* const block = cfg->get_block(i);
@ -86,7 +86,7 @@ int IntelJccErratum::tag_affected_machnodes(Compile* C, PhaseCFG* cfg, PhaseRegA
} }
} }
} }
last_m = NULL; last_m = nullptr;
} else { } else {
last_m = m; last_m = m;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// movq rbx, 0 // movq rbx, 0
// jmp -5 # to self // jmp -5 # to self
if (mark == NULL) { if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section. mark = cbuf.insts_mark(); // Get mark within main instrs section.
} }
@ -50,8 +50,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size()); address base = __ start_a_stub(to_interp_stub_size());
if (base == NULL) { if (base == nullptr) {
return NULL; // CodeBuffer::expand failed. return nullptr; // CodeBuffer::expand failed.
} }
// Static stub relocation stores the instruction address of the call. // Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
@ -82,7 +82,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(); address stub = find_stub();
guarantee(stub != NULL, "stub not found"); guarantee(stub != nullptr, "stub not found");
if (TraceICs) { if (TraceICs) {
ResourceMark rm; ResourceMark rm;
@ -108,7 +108,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call"); assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call");
// Reset stub. // Reset stub.
address stub = static_stub->addr(); address stub = static_stub->addr();
assert(stub != NULL, "stub not found"); assert(stub != nullptr, "stub not found");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
method_holder->set_data(0); method_holder->set_data(0);
@ -128,12 +128,12 @@ void CompiledDirectStaticCall::verify() {
#ifdef ASSERT #ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob((address) _call); CodeBlob *cb = CodeCache::find_blob((address) _call);
assert(cb != NULL, "sanity"); assert(cb != nullptr, "sanity");
#endif #endif
// Verify stub. // Verify stub.
address stub = find_stub(); address stub = find_stub();
assert(stub != NULL, "no stub found for static call"); assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());

View File

@ -140,7 +140,7 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|| (f.unextended_sp() == f.sp()), ""); || (f.unextended_sp() == f.sp()), "");
assert(f.fp() > (intptr_t*)f.at(frame::interpreter_frame_initial_sp_offset), ""); assert(f.fp() > (intptr_t*)f.at(frame::interpreter_frame_initial_sp_offset), "");
// at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts // at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts
*hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp(); *hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp();
// Make sure that locals is already relativized. // Make sure that locals is already relativized.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@
// the perfect job. In those cases, decode_instruction0 may kick in // the perfect job. In those cases, decode_instruction0 may kick in
// and do it right. // and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" // If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) { static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) {
return here; return here;
} }

View File

@ -71,7 +71,7 @@ public:
_captured_state_mask(captured_state_mask), _captured_state_mask(captured_state_mask),
_frame_complete(0), _frame_complete(0),
_frame_size_slots(0), _frame_size_slots(0),
_oop_maps(NULL) { _oop_maps(nullptr) {
} }
void generate(); void generate();

View File

@ -87,7 +87,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// construct the sender and do some validation of it. This goes a long way // construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code // toward eliminating issues when we get in frame construction code
if (_cb != NULL ) { if (_cb != nullptr ) {
// First check if frame is complete and tester is reliable // First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod // Unfortunately we can only check frame complete for runtime stubs and nmethod
@ -113,10 +113,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
return fp_safe; return fp_safe;
} }
intptr_t* sender_sp = NULL; intptr_t* sender_sp = nullptr;
intptr_t* sender_unextended_sp = NULL; intptr_t* sender_unextended_sp = nullptr;
address sender_pc = NULL; address sender_pc = nullptr;
intptr_t* saved_fp = NULL; intptr_t* saved_fp = nullptr;
if (is_interpreted_frame()) { if (is_interpreted_frame()) {
// fp must be safe // fp must be safe
@ -181,7 +181,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc // We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) { if (sender_pc == nullptr || sender_blob == nullptr) {
return false; return false;
} }
@ -214,7 +214,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
if (nm != NULL) { if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) { nm->method()->is_method_handle_intrinsic()) {
return false; return false;
@ -256,7 +256,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Will the pc we fetch be non-zero (which we'll find at the oldest frame) // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
if ( (address) this->fp()[return_addr_offset] == NULL) return false; if ( (address) this->fp()[return_addr_offset] == nullptr) return false;
// could try and do some more potential verification of native frame if we could think of some... // could try and do some more potential verification of native frame if we could think of some...
@ -284,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
*pc_addr = pc; *pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc _pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching"); assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
_pc = original_pc; _pc = original_pc;
@ -356,7 +356,7 @@ void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
} }
frame frame::sender_for_entry_frame(RegisterMap* map) const { frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
// Java frame called from C; skip all C frames and return top C // Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender // frame of that chunk as the sender
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -383,11 +383,11 @@ bool frame::upcall_stub_frame_is_first() const {
assert(is_upcall_stub_frame(), "must be optimzed entry frame"); assert(is_upcall_stub_frame(), "must be optimzed entry frame");
UpcallStub* blob = _cb->as_upcall_stub(); UpcallStub* blob = _cb->as_upcall_stub();
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this); JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
return jfa->last_Java_sp() == NULL; return jfa->last_Java_sp() == nullptr;
} }
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
UpcallStub* blob = _cb->as_upcall_stub(); UpcallStub* blob = _cb->as_upcall_stub();
// Java frame called from C; skip all C frames and return top C // Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender // frame of that chunk as the sender
@ -432,9 +432,9 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are // as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites. // returning to any of these call sites.
if (_cb != NULL) { if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != NULL) { if (sender_cm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC. // If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) || if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) { sender_cm->is_deopt_mh_entry(_pc)) {
@ -560,7 +560,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
} else { } else {
oop* obj_p = (oop*)tos_addr; oop* obj_p = (oop*)tos_addr;
obj = (obj_p == NULL) ? (oop)NULL : *obj_p; obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p;
} }
assert(Universe::is_in_heap_or_null(obj), "sanity check"); assert(Universe::is_in_heap_or_null(obj), "sanity check");
*oop_result = obj; *oop_result = obj;
@ -659,10 +659,10 @@ frame::frame(void* sp, void* fp, void* pc) {
void JavaFrameAnchor::make_walkable() { void JavaFrameAnchor::make_walkable() {
// last frame set? // last frame set?
if (last_Java_sp() == NULL) return; if (last_Java_sp() == nullptr) return;
// already walkable? // already walkable?
if (walkable()) return; if (walkable()) return;
vmassert(last_Java_pc() == NULL, "already walkable"); vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1]; _last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong"); vmassert(walkable(), "something went wrong");
} }

View File

@ -39,13 +39,13 @@
// Constructors: // Constructors:
inline frame::frame() { inline frame::frame() {
_pc = NULL; _pc = nullptr;
_sp = NULL; _sp = nullptr;
_unextended_sp = NULL; _unextended_sp = nullptr;
_fp = NULL; _fp = nullptr;
_cb = NULL; _cb = nullptr;
_deopt_state = unknown; _deopt_state = unknown;
_oop_map = NULL; _oop_map = nullptr;
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
} }
@ -55,11 +55,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_unextended_sp = sp; _unextended_sp = sp;
_fp = fp; _fp = fp;
_pc = pc; _pc = pc;
_oop_map = NULL; _oop_map = nullptr;
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
setup(pc); setup(pc);
} }
@ -68,10 +68,10 @@ inline void frame::setup(address pc) {
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc), assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)"); "original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else { } else {
if (_cb == SharedRuntime::deopt_blob()) { if (_cb == SharedRuntime::deopt_blob()) {
@ -91,10 +91,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_unextended_sp = unextended_sp; _unextended_sp = unextended_sp;
_fp = fp; _fp = fp;
_pc = pc; _pc = pc;
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = cb; _cb = cb;
_oop_map = NULL; _oop_map = nullptr;
assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc)); assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
@ -115,7 +115,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
// In thaw, non-heap frames use this constructor to pass oop_map. I don't know why. // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
assert(_on_heap || _cb != nullptr, "these frames are always heap frames"); assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
if (cb != NULL) { if (cb != nullptr) {
setup(pc); setup(pc);
} }
#ifdef ASSERT #ifdef ASSERT
@ -132,10 +132,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_unextended_sp = unextended_sp; _unextended_sp = unextended_sp;
_fp = fp; _fp = fp;
_pc = pc; _pc = pc;
assert(pc != NULL, "no pc?"); assert(pc != nullptr, "no pc?");
_cb = CodeCache::find_blob_fast(pc); _cb = CodeCache::find_blob_fast(pc);
_oop_map = NULL; _oop_map = nullptr;
assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp)); assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
_on_heap = false; _on_heap = false;
DEBUG_ONLY(_frame_index = -1;) DEBUG_ONLY(_frame_index = -1;)
@ -160,19 +160,19 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// Then we could use the assert below. However this assert is of somewhat dubious // Then we could use the assert below. However this assert is of somewhat dubious
// value. // value.
// UPDATE: this constructor is only used by trace_method_handle_stub() now. // UPDATE: this constructor is only used by trace_method_handle_stub() now.
// assert(_pc != NULL, "no pc?"); // assert(_pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(_pc); _cb = CodeCache::find_blob(_pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
} else { } else {
_deopt_state = not_deoptimized; _deopt_state = not_deoptimized;
} }
_oop_map = NULL; _oop_map = nullptr;
} }
// Accessors // Accessors
@ -187,19 +187,19 @@ inline bool frame::equal(frame other) const {
} }
// Return unique id for this frame. The id must have a value where we can distinguish // Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. NULL represents an invalid (incomparable) // identity and younger/older relationship. null represents an invalid (incomparable)
// frame. // frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); } inline intptr_t* frame::id(void) const { return unextended_sp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id // Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
return this->id() > id ; } return this->id() > id ; }
inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); } inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); }
inline intptr_t* frame::link_or_null() const { inline intptr_t* frame::link_or_null() const {
intptr_t** ptr = (intptr_t **)addr_at(link_offset); intptr_t** ptr = (intptr_t **)addr_at(link_offset);
return os::is_readable_pointer(ptr) ? *ptr : NULL; return os::is_readable_pointer(ptr) ? *ptr : nullptr;
} }
inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; } inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
@ -208,7 +208,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); retu
inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; } inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
inline intptr_t* frame::real_fp() const { inline intptr_t* frame::real_fp() const {
if (_cb != NULL) { if (_cb != nullptr) {
// use the frame size if valid // use the frame size if valid
int size = _cb->frame_size(); int size = _cb->frame_size();
if (size > 0) { if (size > 0) {
@ -232,7 +232,7 @@ inline int frame::compiled_frame_stack_argsize() const {
} }
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
assert(mask != NULL, ""); assert(mask != nullptr, "");
Method* m = interpreter_frame_method(); Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci(); int bci = interpreter_frame_bci();
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
@ -285,7 +285,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
// top of expression stack // top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const { inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp(); intptr_t* last_sp = interpreter_frame_last_sp();
if (last_sp == NULL) { if (last_sp == nullptr) {
return sp(); return sp();
} else { } else {
// sp() may have been extended or shrunk by an adapter. At least // sp() may have been extended or shrunk by an adapter. At least
@ -323,13 +323,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
inline oop frame::saved_oop_result(RegisterMap* map) const { inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
return *result_adr; return *result_adr;
} }
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
*result_adr = obj; *result_adr = obj;
} }
@ -343,17 +343,17 @@ inline int frame::sender_sp_ret_address_offset() {
} }
inline const ImmutableOopMap* frame::get_oop_map() const { inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL; if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != NULL) { if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc); NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) { if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff); int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc); return _cb->oop_map_for_slot(slot, _pc);
} }
const ImmutableOopMap* oop_map = OopMapSet::find_map(this); const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map; return oop_map;
} }
return NULL; return nullptr;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -383,7 +383,7 @@ inline frame frame::sender_raw(RegisterMap* map) const {
if (is_interpreted_frame()) return sender_for_interpreter_frame(map); if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()), "Must be the same"); assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
if (_cb != NULL) return sender_for_compiled_frame(map); if (_cb != nullptr) return sender_for_compiled_frame(map);
// Must be native-compiled frame, i.e. the marshaling code for native // Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system. // methods that exists in the core system.
@ -391,7 +391,7 @@ inline frame frame::sender_raw(RegisterMap* map) const {
} }
inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
// frame owned by optimizing compiler // frame owned by optimizing compiler
assert(_cb->frame_size() > 0, "must have non-zero frame size"); assert(_cb->frame_size() > 0, "must have non-zero frame size");
@ -412,13 +412,13 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// outside of update_register_map. // outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != NULL) { if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map); _oop_map->update_register_map(this, map);
} }
} else { } else {
assert(!_cb->caller_must_gc_arguments(map->thread()), ""); assert(!_cb->caller_must_gc_arguments(map->thread()), "");
assert(!map->include_argument_oops(), ""); assert(!map->include_argument_oops(), "");
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
} }
// Since the prolog does the save and restore of EBP there is no oopmap // Since the prolog does the save and restore of EBP there is no oopmap

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -230,7 +230,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
// Calling the runtime using the regular call_VM_leaf mechanism generates // Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base) // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
// //
// If we care generating the pre-barrier without a frame (e.g. in the // If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to // intrinsified Reference.get() routine) then ebp might be pointing to
@ -291,12 +291,12 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ shrptr(tmp, HeapRegion::LogOfHRGrainBytes); __ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, done);
// crosses regions, storing NULL? // crosses regions, storing null?
__ cmpptr(new_val, NULL_WORD); __ cmpptr(new_val, NULL_WORD);
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, done);
// storing region crossing non-NULL, is card already dirty? // storing region crossing non-null, is card already dirty?
const Register card_addr = tmp; const Register card_addr = tmp;
const Register cardtable = tmp2; const Register cardtable = tmp2;
@ -316,7 +316,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean. // storing a region crossing, non-null oop, card is clean.
// dirty card and log. // dirty card and log.
__ movb(Address(card_addr, 0), G1CardTable::dirty_card_val()); __ movb(Address(card_addr, 0), G1CardTable::dirty_card_val());
@ -519,7 +519,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Label enqueued; Label enqueued;
Label runtime; Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions. // At this point we know new_value is non-null and the new_value crosses regions.
// Must check to see if card is already dirty // Must check to see if card is already dirty
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
@ -549,7 +549,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
__ cmpb(Address(card_addr, 0), CardTable::dirty_card_val()); __ cmpb(Address(card_addr, 0), CardTable::dirty_card_val());
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean. // storing region crossing non-null, card is clean.
// dirty card and log. // dirty card and log.
__ movb(Address(card_addr, 0), CardTable::dirty_card_val()); __ movb(Address(card_addr, 0), CardTable::dirty_card_val());

View File

@ -382,7 +382,7 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register th
#ifdef _LP64 #ifdef _LP64
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation) { void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) { if (bs_nm == nullptr) {
return; return;
} }
Register thread = r15_thread; Register thread = r15_thread;
@ -396,7 +396,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
uintptr_t after_cmp = (uintptr_t)__ pc(); uintptr_t after_cmp = (uintptr_t)__ pc();
guarantee(after_cmp - before_cmp == 8, "Wrong assumed instruction length"); guarantee(after_cmp - before_cmp == 8, "Wrong assumed instruction length");
if (slow_path != NULL) { if (slow_path != nullptr) {
__ jcc(Assembler::notEqual, *slow_path); __ jcc(Assembler::notEqual, *slow_path);
__ bind(*continuation); __ bind(*continuation);
} else { } else {
@ -409,7 +409,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
#else #else
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, Label*) { void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, Label*) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) { if (bs_nm == nullptr) {
return; return;
} }
@ -430,7 +430,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, La
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) { if (bs == nullptr) {
return; return;
} }

View File

@ -264,7 +264,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
// Calling the runtime using the regular call_VM_leaf mechanism generates // Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base) // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
// //
// If we care generating the pre-barrier without a frame (e.g. in the // If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to // intrinsified Reference.get() routine) then ebp might be pointing to
@ -703,7 +703,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
// Before reaching to resolve sequence, see if we can avoid the whole shebang // Before reaching to resolve sequence, see if we can avoid the whole shebang
// with filters. // with filters.
// Filter: when offending in-memory value is NULL, the failure is definitely legitimate // Filter: when offending in-memory value is null, the failure is definitely legitimate
__ testptr(oldval, oldval); __ testptr(oldval, oldval);
__ jcc(Assembler::zero, L_failure); __ jcc(Assembler::zero, L_failure);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86. define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment. define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't // See 4827828 for this change. There is no globals_core_i486.hpp. I can't

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached
// because // because
// (1) the value is old (i.e., doesn't matter for scavenges) // (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
// assert(cached_value == NULL || cached_oop->is_perm(), "must be perm oop"); // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type)); masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
masm->jump(ExternalAddress(entry_point)); masm->jump(ExternalAddress(entry_point));
} }

View File

@ -268,7 +268,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL"); " last_sp != null");
bind(L); bind(L);
} }
#endif #endif
@ -300,7 +300,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base:" stop("InterpreterMacroAssembler::call_VM_base:"
" last_sp != NULL"); " last_sp != nullptr");
bind(L); bind(L);
} }
#endif /* ASSERT */ #endif /* ASSERT */
@ -399,7 +399,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
testptr(tmp, tmp); testptr(tmp, tmp);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == nullptr) exit;
// Initiate earlyret handling only if it is not already being processed. // Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code // If the flag has the earlyret_processing bit set, it means that this code
@ -1377,7 +1377,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
push(rbx); push(rbx);
get_method(rbx); get_method(rbx);
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is null.
movptr(rax, Address(rbx, in_bytes(Method::method_data_offset()))); movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
testptr(rax, rax); testptr(rax, rax);
jcc(Assembler::zero, set_mdp); jcc(Assembler::zero, set_mdp);
@ -1760,7 +1760,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
} }
// In the fall-through case, we found no matching item, but we // In the fall-through case, we found no matching item, but we
// observed the item[start_row] is NULL. // observed the item[start_row] is null.
// Fill in the item field and increment the count. // Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row)); int item_offset = in_bytes(item_offset_fn(start_row));
@ -1776,13 +1776,13 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg
// Example state machine code for three profile rows: // Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1] // // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; } // if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != NULL) { // if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1] // // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; } // if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != NULL) { // if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2] // // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; } // if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow // if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done; // row[2].init(rec); goto done;
// } else { // } else {
// // remember row[1] is empty // // remember row[1] is empty
@ -1988,7 +1988,7 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Ad
incrementl(scratch, InvocationCounter::count_increment); incrementl(scratch, InvocationCounter::count_increment);
movl(counter_addr, scratch); movl(counter_addr, scratch);
andl(scratch, mask); andl(scratch, mask);
if (where != NULL) { if (where != nullptr) {
jcc(Assembler::zero, *where); jcc(Assembler::zero, *where);
} }
} }

View File

@ -179,7 +179,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void empty_expression_stack() { void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call // null last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
NOT_LP64(empty_FPU_stack()); NOT_LP64(empty_FPU_stack());
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,10 @@ public:
void clear(void) { void clear(void) {
// clearing _last_Java_sp must be first // clearing _last_Java_sp must be first
_last_Java_sp = NULL; _last_Java_sp = nullptr;
// fence? // fence?
_last_Java_fp = NULL; _last_Java_fp = nullptr;
_last_Java_pc = NULL; _last_Java_pc = nullptr;
} }
void copy(JavaFrameAnchor* src) { void copy(JavaFrameAnchor* src) {
@ -50,11 +50,11 @@ public:
// We must clear _last_Java_sp before copying the rest of the new data // We must clear _last_Java_sp before copying the rest of the new data
// //
// Hack Alert: Temporary bugfix for 4717480/4721647 // Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp // To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing // unless the value is changing
// //
if (_last_Java_sp != src->_last_Java_sp) if (_last_Java_sp != src->_last_Java_sp)
_last_Java_sp = NULL; _last_Java_sp = nullptr;
_last_Java_fp = src->_last_Java_fp; _last_Java_fp = src->_last_Java_fp;
_last_Java_pc = src->_last_Java_pc; _last_Java_pc = src->_last_Java_pc;
@ -62,7 +62,7 @@ public:
_last_Java_sp = src->_last_Java_sp; _last_Java_sp = src->_last_Java_sp;
} }
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; }
void make_walkable(); void make_walkable();
intptr_t* last_Java_sp(void) const { return _last_Java_sp; } intptr_t* last_Java_sp(void) const { return _last_Java_sp; }

View File

@ -51,7 +51,7 @@ GetDoubleField_t JNI_FastGetField::jni_fast_GetDoubleField_fp;
// between loads, which is much more efficient than lfence. // between loads, which is much more efficient than lfence.
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name = NULL; const char *name = nullptr;
switch (type) { switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -128,7 +128,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
__ bind (slow); __ bind (slow);
address slow_case_addr = NULL; address slow_case_addr = nullptr;
switch (type) { switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -264,7 +264,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
} }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name = NULL; const char *name = nullptr;
switch (type) { switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -343,7 +343,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
__ bind (slow); __ bind (slow);
address slow_case_addr = NULL; address slow_case_addr = nullptr;
switch (type) { switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;

View File

@ -49,7 +49,7 @@ static const Register roffset = r10;
static const Register rcounter = r11; static const Register rcounter = r11;
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name = NULL; const char *name = nullptr;
switch (type) { switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -107,7 +107,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
__ bind (slow); __ bind (slow);
address slow_case_addr = NULL; address slow_case_addr = nullptr;
switch (type) { switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -150,7 +150,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
} }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name = NULL; const char *name = nullptr;
switch (type) { switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -199,7 +199,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
__ bind (slow); __ bind (slow);
address slow_case_addr = NULL; address slow_case_addr = nullptr;
switch (type) { switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
} }
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, methodHandle& method, jint pc_offset, JVMCI_TRAPS) { void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, methodHandle& method, jint pc_offset, JVMCI_TRAPS) {
NativeCall* call = NULL; NativeCall* call = nullptr;
switch (_next_call_type) { switch (_next_call_type) {
case INLINE_INVOKE: case INLINE_INVOKE:
return; return;

View File

@ -1036,7 +1036,7 @@ void MacroAssembler::object_move(OopMap* map,
Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
// See if oop is NULL if it is we need no handle // See if oop is null if it is we need no handle
if (src.first()->is_stack()) { if (src.first()->is_stack()) {
@ -1049,12 +1049,12 @@ void MacroAssembler::object_move(OopMap* map,
cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
lea(rHandle, Address(rbp, reg2offset_in(src.first()))); lea(rHandle, Address(rbp, reg2offset_in(src.first())));
// conditionally move a NULL // conditionally move a null
cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
} else { } else {
// Oop is in a register we must store it to the space we reserve // Oop is in a register we must store it to the space we reserve
// on the stack for oop_handles and pass a handle if oop is non-NULL // on the stack for oop_handles and pass a handle if oop is non-null
const Register rOop = src.first()->as_Register(); const Register rOop = src.first()->as_Register();
int oop_slot; int oop_slot;
@ -1077,7 +1077,7 @@ void MacroAssembler::object_move(OopMap* map,
int offset = oop_slot*VMRegImpl::stack_slot_size; int offset = oop_slot*VMRegImpl::stack_slot_size;
map->set_oop(VMRegImpl::stack2reg(oop_slot)); map->set_oop(VMRegImpl::stack2reg(oop_slot));
// Store oop in handle area, may be NULL // Store oop in handle area, may be null
movptr(Address(rsp, offset), rOop); movptr(Address(rsp, offset), rOop);
if (is_receiver) { if (is_receiver) {
*receiver_offset = offset; *receiver_offset = offset;
@ -1085,7 +1085,7 @@ void MacroAssembler::object_move(OopMap* map,
cmpptr(rOop, NULL_WORD); cmpptr(rOop, NULL_WORD);
lea(rHandle, Address(rsp, offset)); lea(rHandle, Address(rsp, offset));
// conditionally move a NULL from the handle area where it was just stored // conditionally move a null from the handle area where it was just stored
cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
} }
@ -1344,7 +1344,7 @@ void MacroAssembler::ic_call(address entry, jint method_index) {
void MacroAssembler::emit_static_call_stub() { void MacroAssembler::emit_static_call_stub() {
// Static stub relocation also tags the Method* in the code-stream. // Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code. // This is recognized as unresolved by relocs/nativeinst/ic code.
jump(RuntimeAddress(pc())); jump(RuntimeAddress(pc()));
} }
@ -1562,7 +1562,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
assert(last_java_sp != rbp, "can't use ebp/rbp"); assert(last_java_sp != rbp, "can't use ebp/rbp");
// Only interpreter should have to set fp // Only interpreter should have to set fp
set_last_Java_frame(java_thread, last_java_sp, rbp, NULL, rscratch1); set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1);
// do the call, remove parameters // do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
@ -2854,7 +2854,7 @@ void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratc
void MacroAssembler::null_check(Register reg, int offset) { void MacroAssembler::null_check(Register reg, int offset) {
if (needs_explicit_null_check(offset)) { if (needs_explicit_null_check(offset)) {
// provoke OS NULL exception if reg = NULL by // provoke OS null exception if reg is null by
// accessing M[reg] w/o changing any (non-CC) registers // accessing M[reg] w/o changing any (non-CC) registers
// NOTE: cmpl is plenty here to provoke a segv // NOTE: cmpl is plenty here to provoke a segv
cmpptr(rax, Address(reg, 0)); cmpptr(rax, Address(reg, 0));
@ -2863,7 +2863,7 @@ void MacroAssembler::null_check(Register reg, int offset) {
// testl needs to be implemented first) // testl needs to be implemented first)
} else { } else {
// nothing to do, (later) access of M[reg + offset] // nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL // will provoke OS null exception if reg is null
} }
} }
@ -2874,7 +2874,7 @@ void MacroAssembler::os_breakpoint() {
} }
void MacroAssembler::unimplemented(const char* what) { void MacroAssembler::unimplemented(const char* what) {
const char* buf = NULL; const char* buf = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -3105,7 +3105,7 @@ void MacroAssembler::set_last_Java_frame(Register java_thread,
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
} }
// last_java_pc is optional // last_java_pc is optional
if (last_java_pc != NULL) { if (last_java_pc != nullptr) {
Address java_pc(java_thread, Address java_pc(java_thread,
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
lea(java_pc, InternalAddress(last_java_pc), rscratch); lea(java_pc, InternalAddress(last_java_pc), rscratch);
@ -3887,7 +3887,7 @@ void MacroAssembler::resolve_jobject(Register value,
assert_different_registers(value, thread, tmp); assert_different_registers(value, thread, tmp);
Label done, tagged, weak_tagged; Label done, tagged, weak_tagged;
testptr(value, value); testptr(value, value);
jcc(Assembler::zero, done); // Use NULL as-is. jcc(Assembler::zero, done); // Use null as-is.
testptr(value, JNIHandles::tag_mask); // Test for tag. testptr(value, JNIHandles::tag_mask); // Test for tag.
jcc(Assembler::notZero, tagged); jcc(Assembler::notZero, tagged);
@ -3921,7 +3921,7 @@ void MacroAssembler::resolve_global_jobject(Register value,
Label done; Label done;
testptr(value, value); testptr(value, value);
jcc(Assembler::zero, done); // Use NULL as-is. jcc(Assembler::zero, done); // Use null as-is.
#ifdef ASSERT #ifdef ASSERT
{ {
@ -4262,7 +4262,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
} }
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
// if (scan->interface() == intf) { // if (scan->interface() == intf) {
// result = (klass + scan->offset() + itable_index); // result = (klass + scan->offset() + itable_index);
// } // }
@ -4320,8 +4320,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
Register temp_reg, Register temp_reg,
Label& L_success) { Label& L_success) {
Label L_failure; Label L_failure;
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr);
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
bind(L_failure); bind(L_failure);
} }
@ -4344,10 +4344,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label L_fallthrough; Label L_fallthrough;
int label_nulls = 0; int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one null in the batch");
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
@ -4443,9 +4443,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
Label L_fallthrough; Label L_fallthrough;
int label_nulls = 0; int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one null in the batch");
// a couple of useful fields in sub_klass: // a couple of useful fields in sub_klass:
int ss_offset = in_bytes(Klass::secondary_supers_offset()); int ss_offset = in_bytes(Klass::secondary_supers_offset());
@ -4501,7 +4501,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
if (set_cond_codes) { if (set_cond_codes) {
// Special hack for the AD files: rdi is guaranteed non-zero. // Special hack for the AD files: rdi is guaranteed non-zero.
assert(!pushed_rdi, "rdi must be left non-NULL"); assert(!pushed_rdi, "rdi must be left non-null");
// Also, the condition codes are properly set Z/NZ on succeed/failure. // Also, the condition codes are properly set Z/NZ on succeed/failure.
} }
@ -4522,12 +4522,12 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
} }
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
Label L_fallthrough; Label L_fallthrough;
if (L_fast_path == NULL) { if (L_fast_path == nullptr) {
L_fast_path = &L_fallthrough; L_fast_path = &L_fallthrough;
} else if (L_slow_path == NULL) { } else if (L_slow_path == nullptr) {
L_slow_path = &L_fallthrough; L_slow_path = &L_fallthrough;
} }
@ -4581,7 +4581,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
push(reg); // pass register argument push(reg); // pass register argument
// Pass register number to verify_oop_subroutine // Pass register number to verify_oop_subroutine
const char* b = NULL; const char* b = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -4651,7 +4651,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
} }
// Pass register number to verify_oop_subroutine // Pass register number to verify_oop_subroutine
const char* b = NULL; const char* b = nullptr;
{ {
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
@ -4720,7 +4720,7 @@ class ControlWord {
case 2: rc = "round up "; break; case 2: rc = "round up "; break;
case 3: rc = "chop "; break; case 3: rc = "chop "; break;
default: default:
rc = NULL; // silence compiler warnings rc = nullptr; // silence compiler warnings
fatal("Unknown rounding control: %d", rounding_control()); fatal("Unknown rounding control: %d", rounding_control());
}; };
// precision control // precision control
@ -4731,7 +4731,7 @@ class ControlWord {
case 2: pc = "53 bits "; break; case 2: pc = "53 bits "; break;
case 3: pc = "64 bits "; break; case 3: pc = "64 bits "; break;
default: default:
pc = NULL; // silence compiler warnings pc = nullptr; // silence compiler warnings
fatal("Unknown precision control: %d", precision_control()); fatal("Unknown precision control: %d", precision_control());
}; };
// flags // flags
@ -4853,7 +4853,7 @@ class FPU_State {
case 3: return "empty"; case 3: return "empty";
} }
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
void print() const { void print() const {
@ -5189,7 +5189,7 @@ void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
} }
// Used for storing NULLs. // Used for storing nulls.
void MacroAssembler::store_heap_oop_null(Address dst) { void MacroAssembler::store_heap_oop_null(Address dst) {
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
} }
@ -5205,7 +5205,7 @@ void MacroAssembler::store_klass_gap(Register dst, Register src) {
#ifdef ASSERT #ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) { void MacroAssembler::verify_heapbase(const char* msg) {
assert (UseCompressedOops, "should be compressed"); assert (UseCompressedOops, "should be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
if (CheckCompressedOops) { if (CheckCompressedOops) {
Label ok; Label ok;
ExternalAddress src2(CompressedOops::ptrs_base_addr()); ExternalAddress src2(CompressedOops::ptrs_base_addr());
@ -5230,7 +5230,7 @@ void MacroAssembler::encode_heap_oop(Register r) {
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
#endif #endif
verify_oop_msg(r, "broken oop in encode_heap_oop"); verify_oop_msg(r, "broken oop in encode_heap_oop");
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shrq(r, LogMinObjAlignmentInBytes); shrq(r, LogMinObjAlignmentInBytes);
@ -5255,7 +5255,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register r) {
} }
#endif #endif
verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
subq(r, r12_heapbase); subq(r, r12_heapbase);
} }
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
@ -5279,7 +5279,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
if (dst != src) { if (dst != src) {
movq(dst, src); movq(dst, src);
} }
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
subq(dst, r12_heapbase); subq(dst, r12_heapbase);
} }
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
@ -5292,7 +5292,7 @@ void MacroAssembler::decode_heap_oop(Register r) {
#ifdef ASSERT #ifdef ASSERT
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
#endif #endif
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == nullptr) {
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes); shlq(r, LogMinObjAlignmentInBytes);
@ -5310,25 +5310,25 @@ void MacroAssembler::decode_heap_oop(Register r) {
void MacroAssembler::decode_heap_oop_not_null(Register r) { void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Note: it will change flags // Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (CompressedOops::shift() != 0) { if (CompressedOops::shift() != 0) {
assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes); shlq(r, LogMinObjAlignmentInBytes);
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
addq(r, r12_heapbase); addq(r, r12_heapbase);
} }
} else { } else {
assert (CompressedOops::base() == NULL, "sanity"); assert (CompressedOops::base() == nullptr, "sanity");
} }
} }
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
// Note: it will change flags // Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
@ -5341,12 +5341,12 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
movq(dst, src); movq(dst, src);
} }
shlq(dst, LogMinObjAlignmentInBytes); shlq(dst, LogMinObjAlignmentInBytes);
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
addq(dst, r12_heapbase); addq(dst, r12_heapbase);
} }
} }
} else { } else {
assert (CompressedOops::base() == NULL, "sanity"); assert (CompressedOops::base() == nullptr, "sanity");
if (dst != src) { if (dst != src) {
movq(dst, src); movq(dst, src);
} }
@ -5355,7 +5355,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
assert_different_registers(r, tmp); assert_different_registers(r, tmp);
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != nullptr) {
mov64(tmp, (int64_t)CompressedKlassPointers::base()); mov64(tmp, (int64_t)CompressedKlassPointers::base());
subq(r, tmp); subq(r, tmp);
} }
@ -5367,7 +5367,7 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
assert_different_registers(src, dst); assert_different_registers(src, dst);
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != nullptr) {
mov64(dst, -(int64_t)CompressedKlassPointers::base()); mov64(dst, -(int64_t)CompressedKlassPointers::base());
addq(dst, src); addq(dst, src);
} else { } else {
@ -5390,7 +5390,7 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shlq(r, LogKlassAlignmentInBytes); shlq(r, LogKlassAlignmentInBytes);
} }
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != nullptr) {
mov64(tmp, (int64_t)CompressedKlassPointers::base()); mov64(tmp, (int64_t)CompressedKlassPointers::base());
addq(r, tmp); addq(r, tmp);
} }
@ -5404,13 +5404,13 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (CompressedKlassPointers::base() == NULL && if (CompressedKlassPointers::base() == nullptr &&
CompressedKlassPointers::shift() == 0) { CompressedKlassPointers::shift() == 0) {
// The best case scenario is that there is no base or shift. Then it is already // The best case scenario is that there is no base or shift. Then it is already
// a pointer that needs nothing but a register rename. // a pointer that needs nothing but a register rename.
movl(dst, src); movl(dst, src);
} else { } else {
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != nullptr) {
mov64(dst, (int64_t)CompressedKlassPointers::base()); mov64(dst, (int64_t)CompressedKlassPointers::base());
} else { } else {
xorq(dst, dst); xorq(dst, dst);
@ -5427,8 +5427,8 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src)
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
mov_narrow_oop(dst, oop_index, rspec); mov_narrow_oop(dst, oop_index, rspec);
@ -5436,8 +5436,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
mov_narrow_oop(dst, oop_index, rspec); mov_narrow_oop(dst, oop_index, rspec);
@ -5445,7 +5445,7 @@ void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k); int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index); RelocationHolder rspec = metadata_Relocation::spec(klass_index);
mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5453,7 +5453,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k); int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index); RelocationHolder rspec = metadata_Relocation::spec(klass_index);
mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5461,8 +5461,8 @@ void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
Assembler::cmp_narrow_oop(dst, oop_index, rspec); Assembler::cmp_narrow_oop(dst, oop_index, rspec);
@ -5470,8 +5470,8 @@ void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != nullptr, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
Assembler::cmp_narrow_oop(dst, oop_index, rspec); Assembler::cmp_narrow_oop(dst, oop_index, rspec);
@ -5479,7 +5479,7 @@ void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k); int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index); RelocationHolder rspec = metadata_Relocation::spec(klass_index);
Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5487,7 +5487,7 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k); int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index); RelocationHolder rspec = metadata_Relocation::spec(klass_index);
Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
@ -5495,8 +5495,8 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
void MacroAssembler::reinit_heapbase() { void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) { if (UseCompressedOops) {
if (Universe::heap() != NULL) { if (Universe::heap() != nullptr) {
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == nullptr) {
MacroAssembler::xorptr(r12_heapbase, r12_heapbase); MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
} else { } else {
mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());

View File

@ -91,9 +91,9 @@ class MacroAssembler: public Assembler {
Address as_Address(AddressLiteral adr); Address as_Address(AddressLiteral adr);
Address as_Address(ArrayAddress adr, Register rscratch); Address as_Address(ArrayAddress adr, Register rscratch);
// Support for NULL-checks // Support for null-checks
// //
// Generates code that causes a NULL OS exception if the content of reg is NULL. // Generates code that causes a null OS exception if the content of reg is null.
// If the accessed location is M[reg + offset] and the offset is known, provide the // If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain // offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size). // range (0 <= offset <= page_size).
@ -119,7 +119,7 @@ class MacroAssembler: public Assembler {
char* disp = (char*) &branch[1]; char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1]; int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
file == NULL ? "<NULL>" : file, line); file == nullptr ? "<null>" : file, line);
*disp = imm8; *disp = imm8;
} else { } else {
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
@ -377,7 +377,7 @@ class MacroAssembler: public Assembler {
void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
// Used for storing NULL. All other oop constants should be // Used for storing null. All other oop constants should be
// stored using routines that take a jobject. // stored using routines that take a jobject.
void store_heap_oop_null(Address dst); void store_heap_oop_null(Address dst);
@ -385,7 +385,7 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register dst, Register src); void store_klass_gap(Register dst, Register src);
// This dummy is to prevent a call to store_heap_oop from // This dummy is to prevent a call to store_heap_oop from
// converting a zero (like NULL) into a Register by giving // converting a zero (like null) into a Register by giving
// the compiler two choices it can't resolve // the compiler two choices it can't resolve
void store_heap_oop(Address dst, void* dummy); void store_heap_oop(Address dst, void* dummy);
@ -610,7 +610,7 @@ public:
// Test sub_klass against super_klass, with fast and slow paths. // Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through. // One of the three labels can be null, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass. // If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg. // No registers are killed, except temp_reg.
void check_klass_subtype_fast_path(Register sub_klass, void check_klass_subtype_fast_path(Register sub_klass,
@ -643,8 +643,8 @@ public:
void clinit_barrier(Register klass, void clinit_barrier(Register klass,
Register thread, Register thread,
Label* L_fast_path = NULL, Label* L_fast_path = nullptr,
Label* L_slow_path = NULL); Label* L_slow_path = nullptr);
// method handles (JSR 292) // method handles (JSR 292)
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -231,14 +231,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument. // They all allow an appendix argument.
__ hlt(); // empty stubs make SG sick __ hlt(); // empty stubs make SG sick
return NULL; return nullptr;
} }
// No need in interpreter entry for linkToNative for now. // No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c. // Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) { if (iid == vmIntrinsics::_linkToNative) {
__ hlt(); __ hlt();
return NULL; return nullptr;
} }
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
@ -521,8 +521,8 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* saved_regs, intptr_t* saved_regs,
intptr_t* entry_sp) { intptr_t* entry_sp) {
// called as a leaf from native code: do not block the JVM! // called as a leaf from native code: do not block the JVM!
bool has_mh = (strstr(adaptername, "/static") == NULL && bool has_mh = (strstr(adaptername, "/static") == nullptr &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH strstr(adaptername, "linkTo") == nullptr); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
log_info(methodhandles)("MH %s %s=" PTR_FORMAT " sp=" PTR_FORMAT, adaptername, mh_reg_name, p2i(mh), p2i(entry_sp)); log_info(methodhandles)("MH %s %s=" PTR_FORMAT " sp=" PTR_FORMAT, adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));
@ -584,7 +584,7 @@ void trace_method_handle_stub(const char* adaptername,
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?"); assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = cur_frame; frame trace_calling_frame = cur_frame;
while (trace_calling_frame.fp() < saved_regs) { while (trace_calling_frame.fp() < saved_regs) {
assert(trace_calling_frame.cb() == NULL, "not a C frame"); assert(trace_calling_frame.cb() == nullptr, "not a C frame");
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
} }
assert(trace_calling_frame.sp() < saved_regs, "wrong frame"); assert(trace_calling_frame.sp() < saved_regs, "wrong frame");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -220,7 +220,7 @@ void NativeCall::insert(address code_pos, address entry) {
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
assert(Patching_lock->is_locked() || assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
assert (instr_addr != NULL, "illegal address for code patching"); assert (instr_addr != nullptr, "illegal address for code patching");
NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
@ -616,7 +616,7 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
// (spinlock). Then patches the last byte, and then atomically replaces // (spinlock). Then patches the last byte, and then atomically replaces
// the jmp's with the first 4 byte of the new instruction. // the jmp's with the first 4 byte of the new instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
assert (instr_addr != NULL, "illegal address for code patching (4)"); assert (instr_addr != nullptr, "illegal address for code patching (4)");
NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
// Temporary code // Temporary code

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -745,7 +745,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
if (nop->check()) { if (nop->check()) {
return nop; return nop;
} }
return NULL; return nullptr;
} }
inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) { inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,13 +35,13 @@ address RegisterMap::pd_location(VMReg reg) const {
intptr_t offset_in_bytes = (reg->value() - base_reg->value()) * VMRegImpl::stack_slot_size; intptr_t offset_in_bytes = (reg->value() - base_reg->value()) * VMRegImpl::stack_slot_size;
if (base_reg_enc > 15) { if (base_reg_enc > 15) {
if (offset_in_bytes == 0) { if (offset_in_bytes == 0) {
return NULL; // ZMM16-31 are stored in full. return nullptr; // ZMM16-31 are stored in full.
} }
} else { } else {
if (offset_in_bytes == 0 || offset_in_bytes == 16 || offset_in_bytes == 32) { if (offset_in_bytes == 0 || offset_in_bytes == 16 || offset_in_bytes == 32) {
// Reads of the low and high 16 byte parts should be handled by location itself because // Reads of the low and high 16 byte parts should be handled by location itself because
// they have separate callee saved entries (see RegisterSaver::save_live_registers()). // they have separate callee saved entries (see RegisterSaver::save_live_registers()).
return NULL; return nullptr;
} }
// The upper part of YMM0-15 and ZMM0-15 registers are saved separately in the frame. // The upper part of YMM0-15 and ZMM0-15 registers are saved separately in the frame.
if (offset_in_bytes > 32) { if (offset_in_bytes > 32) {
@ -55,11 +55,11 @@ address RegisterMap::pd_location(VMReg reg) const {
} }
} }
address base_location = location(base_reg, nullptr); address base_location = location(base_reg, nullptr);
if (base_location != NULL) { if (base_location != nullptr) {
return base_location + offset_in_bytes; return base_location + offset_in_bytes;
} }
} }
return NULL; return nullptr;
} }
address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -88,7 +88,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
address Relocation::pd_call_destination(address orig_addr) { address Relocation::pd_call_destination(address orig_addr) {
intptr_t adj = 0; intptr_t adj = 0;
if (orig_addr != NULL) { if (orig_addr != nullptr) {
// We just moved this call instruction from orig_addr to addr(). // We just moved this call instruction from orig_addr to addr().
// This means its target will appear to have grown by addr() - orig_addr. // This means its target will appear to have grown by addr() - orig_addr.
adj = -( addr() - orig_addr ); adj = -( addr() - orig_addr );
@ -104,7 +104,7 @@ address Relocation::pd_call_destination(address orig_addr) {
return (address) ((NativeMovConstReg*)ni)->data(); return (address) ((NativeMovConstReg*)ni)->data();
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -109,7 +109,7 @@ void OptoRuntime::generate_exception_blob() {
// registers of the frame being removed. // registers of the frame being removed.
// //
__ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
__ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg); __ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));

View File

@ -745,22 +745,22 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ movptr(rax, Address(rsp, 0)); __ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls && if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { (Interpreter::code() != nullptr || StubRoutines::code1() != nullptr)) {
// So, let's test for cascading c2i/i2c adapters right now. // So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) || // assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr), // StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame"); // "i2c adapter must return to an interpreter frame");
__ block_comment("verify_i2c { "); __ block_comment("verify_i2c { ");
Label L_ok; Label L_ok;
if (Interpreter::code() != NULL) if (Interpreter::code() != nullptr)
range_check(masm, rax, rdi, range_check(masm, rax, rdi,
Interpreter::code()->code_start(), Interpreter::code()->code_end(), Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok); L_ok);
if (StubRoutines::code1() != NULL) if (StubRoutines::code1() != nullptr)
range_check(masm, rax, rdi, range_check(masm, rax, rdi,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok); L_ok);
if (StubRoutines::code2() != NULL) if (StubRoutines::code2() != nullptr)
range_check(masm, rax, rdi, range_check(masm, rax, rdi,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok); L_ok);
@ -975,7 +975,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, VMRegPair *regs,
VMRegPair *regs2, VMRegPair *regs2,
int total_args_passed) { int total_args_passed) {
assert(regs2 == NULL, "not needed on x86"); assert(regs2 == nullptr, "not needed on x86");
// We return the amount of VMRegImpl stack slots we need to reserve for all // We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots. // the arguments NOT counting out_preserve_stack_slots.
@ -1327,10 +1327,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word, stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1), in_ByteSize(-1),
in_ByteSize(-1), in_ByteSize(-1),
(OopMapSet*)NULL); (OopMapSet*)nullptr);
} }
address native_func = method->native_function(); address native_func = method->native_function();
assert(native_func != NULL, "must have function"); assert(native_func != nullptr, "must have function");
// An OopMap for lock (and class if static) // An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
@ -1346,7 +1346,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL; BasicType* in_elem_bt = nullptr;
int argc = 0; int argc = 0;
out_sig_bt[argc++] = T_ADDRESS; out_sig_bt[argc++] = T_ADDRESS;
@ -1361,7 +1361,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space // Now figure out where the args must be stored and how much stack space
// they require. // they require.
int out_arg_slots; int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in // Compute framesize for the wrapper. We need to handlize all oops in
// registers a max of 2 on x86. // registers a max of 2 on x86.
@ -1487,7 +1487,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */); bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
// Frame is now completed as far as size and linkage. // Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start; int frame_complete = ((intptr_t)__ pc()) - start;
@ -2073,7 +2073,7 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 1536, 1024); CodeBuffer buffer("deopt_blob", 1536, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words; int frame_size_in_words;
OopMap* map = NULL; OopMap* map = nullptr;
// Account for the extra args we place on the stack // Account for the extra args we place on the stack
// by the time we call fetch_unroll_info // by the time we call fetch_unroll_info
const int additional_words = 2; // deopt kind, thread const int additional_words = 2; // deopt kind, thread
@ -2202,7 +2202,7 @@ void SharedRuntime::generate_deopt_blob() {
__ get_thread(rcx); __ get_thread(rcx);
__ push(rcx); __ push(rcx);
// fetch_unroll_info needs to call last_java_frame() // fetch_unroll_info needs to call last_java_frame()
__ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg); __ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
@ -2350,7 +2350,7 @@ void SharedRuntime::generate_deopt_blob() {
__ push(rcx); __ push(rcx);
// set last_Java_sp, last_Java_fp // set last_Java_sp, last_Java_fp
__ set_last_Java_frame(rcx, noreg, rbp, NULL, noreg); __ set_last_Java_frame(rcx, noreg, rbp, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry // Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should // crud. We cannot block on this call, no GC can happen. Call should
@ -2447,7 +2447,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// set last_Java_sp // set last_Java_sp
__ get_thread(rdx); __ get_thread(rdx);
__ set_last_Java_frame(rdx, noreg, noreg, NULL, noreg); __ set_last_Java_frame(rdx, noreg, noreg, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry // Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should // crud. We cannot block on this call, no GC can happen. Call should
@ -2559,7 +2559,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// set last_Java_sp, last_Java_fp // set last_Java_sp, last_Java_fp
__ get_thread(rdi); __ get_thread(rdi);
__ set_last_Java_frame(rdi, noreg, rbp, NULL, noreg); __ set_last_Java_frame(rdi, noreg, rbp, nullptr, noreg);
// Call C code. Need thread but NOT official VM entry // Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should // crud. We cannot block on this call, no GC can happen. Call should
@ -2599,7 +2599,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
const int additional_words = 1; const int additional_words = 1;
int frame_size_in_words; int frame_size_in_words;
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
ResourceMark rm; ResourceMark rm;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
@ -2612,7 +2612,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
const Register java_thread = rdi; // callee-saved for VC++ const Register java_thread = rdi; // callee-saved for VC++
address start = __ pc(); address start = __ pc();
address call_pc = NULL; address call_pc = nullptr;
bool cause_return = (poll_type == POLL_AT_RETURN); bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
@ -2641,7 +2641,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// Push thread argument and setup last_Java_sp // Push thread argument and setup last_Java_sp
__ get_thread(java_thread); __ get_thread(java_thread);
__ push(java_thread); __ push(java_thread);
__ set_last_Java_frame(java_thread, noreg, noreg, NULL, noreg); __ set_last_Java_frame(java_thread, noreg, noreg, nullptr, noreg);
// if this was not a poll_return then we need to correct the return address now. // if this was not a poll_return then we need to correct the return address now.
if (!cause_return) { if (!cause_return) {
@ -2754,7 +2754,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args. // must do any gc of the args.
// //
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code // allocate space for the code
ResourceMark rm; ResourceMark rm;
@ -2768,7 +2768,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
extra_words }; extra_words };
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL; OopMap* map = nullptr;
int start = __ offset(); int start = __ offset();
@ -2780,7 +2780,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ get_thread(rdi); __ get_thread(rdi);
__ push(thread); __ push(thread);
__ set_last_Java_frame(thread, noreg, rbp, NULL, noreg); __ set_last_Java_frame(thread, noreg, rbp, nullptr, noreg);
__ call(RuntimeAddress(destination)); __ call(RuntimeAddress(destination));

View File

@ -796,7 +796,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// caller, but with an uncorrected stack, causing delayed havoc. // caller, but with an uncorrected stack, causing delayed havoc.
if (VerifyAdapterCalls && if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { (Interpreter::code() != nullptr || StubRoutines::code1() != nullptr)) {
// So, let's test for cascading c2i/i2c adapters right now. // So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) || // assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr), // StubRoutines::contains($return_addr),
@ -805,15 +805,15 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// Pick up the return address // Pick up the return address
__ movptr(rax, Address(rsp, 0)); __ movptr(rax, Address(rsp, 0));
Label L_ok; Label L_ok;
if (Interpreter::code() != NULL) if (Interpreter::code() != nullptr)
range_check(masm, rax, r11, range_check(masm, rax, r11,
Interpreter::code()->code_start(), Interpreter::code()->code_end(), Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok); L_ok);
if (StubRoutines::code1() != NULL) if (StubRoutines::code1() != nullptr)
range_check(masm, rax, r11, range_check(masm, rax, r11,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok); L_ok);
if (StubRoutines::code2() != NULL) if (StubRoutines::code2() != nullptr)
range_check(masm, rax, r11, range_check(masm, rax, r11,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok); L_ok);
@ -1014,7 +1014,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_entry = __ pc(); address c2i_entry = __ pc();
// Class initialization barrier for static methods // Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL; address c2i_no_clinit_check_entry = nullptr;
if (VM_Version::supports_fast_class_init_checks()) { if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier; Label L_skip_barrier;
Register method = rbx; Register method = rbx;
@ -1048,7 +1048,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, VMRegPair *regs,
VMRegPair *regs2, VMRegPair *regs2,
int total_args_passed) { int total_args_passed) {
assert(regs2 == NULL, "not needed on x86"); assert(regs2 == nullptr, "not needed on x86");
// We return the amount of VMRegImpl stack slots we need to reserve for all // We return the amount of VMRegImpl stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots. // the arguments NOT counting out_preserve_stack_slots.
@ -1761,10 +1761,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots / VMRegImpl::slots_per_word, stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1), in_ByteSize(-1),
in_ByteSize(-1), in_ByteSize(-1),
(OopMapSet*)NULL); nullptr);
} }
address native_func = method->native_function(); address native_func = method->native_function();
assert(native_func != NULL, "must have function"); assert(native_func != nullptr, "must have function");
// An OopMap for lock (and class if static) // An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
@ -1781,7 +1781,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL; BasicType* in_elem_bt = nullptr;
int argc = 0; int argc = 0;
out_sig_bt[argc++] = T_ADDRESS; out_sig_bt[argc++] = T_ADDRESS;
@ -1796,7 +1796,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space // Now figure out where the args must be stored and how much stack space
// they require. // they require.
int out_arg_slots; int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); out_arg_slots = c_calling_convention(out_sig_bt, out_regs, nullptr, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in // Compute framesize for the wrapper. We need to handlize all oops in
// incoming registers // incoming registers
@ -1926,7 +1926,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
// native wrapper is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub // native wrapper is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub
bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */); bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
// Frame is now completed as far as size and linkage. // Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start; int frame_complete = ((intptr_t)__ pc()) - start;
@ -2524,7 +2524,7 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer("deopt_blob", 2560+pad, 1024); CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words; int frame_size_in_words;
OopMap* map = NULL; OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
// ------------- // -------------
@ -2602,7 +2602,7 @@ void SharedRuntime::generate_deopt_blob() {
// Save everything in sight. // Save everything in sight.
RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true); RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
// fetch_unroll_info needs to call last_java_frame() // fetch_unroll_info needs to call last_java_frame()
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
__ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset()))); __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
__ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1); __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
@ -2684,7 +2684,7 @@ void SharedRuntime::generate_deopt_blob() {
// fetch_unroll_info needs to call last_java_frame(). // fetch_unroll_info needs to call last_java_frame().
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
__ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); __ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
@ -2717,7 +2717,7 @@ void SharedRuntime::generate_deopt_blob() {
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending? __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException); __ jcc(Assembler::notEqual, noException);
__ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
// QQQ this is useless it was NULL above // QQQ this is useless it was null above
__ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
__ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD); __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD); __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
@ -2903,7 +2903,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// runtime expects it. // runtime expects it.
__ movl(c_rarg1, j_rarg0); __ movl(c_rarg1, j_rarg0);
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
// Call C code. Need thread but NOT official VM entry // Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should // crud. We cannot block on this call, no GC can happen. Call should
@ -3061,7 +3061,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// and setup oopmap. // and setup oopmap.
// //
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
assert(StubRoutines::forward_exception_entry() != NULL, assert(StubRoutines::forward_exception_entry() != nullptr,
"must be generated before"); "must be generated before");
ResourceMark rm; ResourceMark rm;
@ -3073,7 +3073,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
address start = __ pc(); address start = __ pc();
address call_pc = NULL; address call_pc = nullptr;
int frame_size_in_words; int frame_size_in_words;
bool cause_return = (poll_type == POLL_AT_RETURN); bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_wide_vectors = (poll_type == POLL_AT_VECTOR_LOOP); bool save_wide_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
@ -3097,7 +3097,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// address of the call in order to generate an oopmap. Hence, we do all the // address of the call in order to generate an oopmap. Hence, we do all the
// work ourselves. // work ourselves.
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next: __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
// The return address must always be correct so that frame constructor never // The return address must always be correct so that frame constructor never
// sees an invalid pc. // sees an invalid pc.
@ -3227,7 +3227,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args. // must do any gc of the args.
// //
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
// allocate space for the code // allocate space for the code
ResourceMark rm; ResourceMark rm;
@ -3238,7 +3238,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
int frame_size_in_words; int frame_size_in_words;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL; OopMap* map = nullptr;
int start = __ offset(); int start = __ offset();
@ -3247,7 +3247,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
int frame_complete = __ offset(); int frame_complete = __ offset();
__ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
__ mov(c_rarg0, r15_thread); __ mov(c_rarg0, r15_thread);

View File

@ -364,7 +364,7 @@ class StubGenerator: public StubCodeGenerator {
ExternalAddress((address)__FILE__), noreg); ExternalAddress((address)__FILE__), noreg);
__ movl(Address(rcx, Thread::exception_line_offset()), __LINE__ ); __ movl(Address(rcx, Thread::exception_line_offset()), __LINE__ );
// complete return to VM // complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); assert(StubRoutines::_call_stub_return_address != nullptr, "_call_stub_return_address must have been generated before");
__ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
return start; return start;
@ -970,7 +970,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable' // make sure object is 'reasonable'
__ movptr(rax, Address(rsp, 4 * wordSize)); // get object __ movptr(rax, Address(rsp, 4 * wordSize)); // get object
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is ok __ jcc(Assembler::zero, exit); // if obj is null it is ok
// Check if the oop is in the right area of memory // Check if the oop is in the right area of memory
const int oop_mask = Universe::verify_oop_mask(); const int oop_mask = Universe::verify_oop_mask();
@ -983,7 +983,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure klass is 'reasonable', which is not zero. // make sure klass is 'reasonable', which is not zero.
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken __ jcc(Assembler::zero, error); // if klass is null it is broken
// return if everything seems ok // return if everything seems ok
__ bind(exit); __ bind(exit);
@ -1109,7 +1109,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(to , Address(rsp, 12+ 8)); __ movptr(to , Address(rsp, 12+ 8));
__ movl(count, Address(rsp, 12+ 12)); __ movl(count, Address(rsp, 12+ 12));
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); // Entry point from conjoint arraycopy stub. *entry = __ pc(); // Entry point from conjoint arraycopy stub.
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
@ -1286,7 +1286,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(dst , Address(rsp, 12+ 8)); // to __ movptr(dst , Address(rsp, 12+ 8)); // to
__ movl2ptr(count, Address(rsp, 12+12)); // count __ movl2ptr(count, Address(rsp, 12+12)); // count
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); // Entry point from generic arraycopy stub. *entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
@ -1544,13 +1544,13 @@ class StubGenerator: public StubCodeGenerator {
Label L_fallthrough; Label L_fallthrough;
#define LOCAL_JCC(assembler_con, label_ptr) \ #define LOCAL_JCC(assembler_con, label_ptr) \
if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ if (label_ptr != nullptr) __ jcc(assembler_con, *(label_ptr)); \
else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
// The following is a strange variation of the fast path which requires // The following is a strange variation of the fast path which requires
// one less register, because needed values are on the argument stack. // one less register, because needed values are on the argument stack.
// __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
// L_success, L_failure, NULL); // L_success, L_failure, null);
assert_different_registers(sub_klass, temp); assert_different_registers(sub_klass, temp);
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
@ -1579,8 +1579,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(L_fallthrough); __ bind(L_fallthrough);
if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } if (L_success == nullptr) { BLOCK_COMMENT("L_success:"); }
if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } if (L_failure == nullptr) { BLOCK_COMMENT("L_failure:"); }
#undef LOCAL_JCC #undef LOCAL_JCC
} }
@ -1634,7 +1634,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(to, to_arg); __ movptr(to, to_arg);
__ movl2ptr(length, length_arg); __ movl2ptr(length, length_arg);
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); // Entry point from generic arraycopy stub. *entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
@ -1702,7 +1702,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(elem_klass, elem_klass_addr); // query the object klass __ movptr(elem_klass, elem_klass_addr); // query the object klass
generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
&L_store_element, NULL); &L_store_element, nullptr);
// (On fall-through, we have failed the element type check.) // (On fall-through, we have failed the element type check.)
// ======== end loop ======== // ======== end loop ========
@ -1909,7 +1909,7 @@ class StubGenerator: public StubCodeGenerator {
// (2) src_pos must not be negative. // (2) src_pos must not be negative.
// (3) dst_pos must not be negative. // (3) dst_pos must not be negative.
// (4) length must not be negative. // (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL. // (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays. // (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src. // (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst. // (8) dst_pos + length must not exceed length of dst.
@ -1921,7 +1921,7 @@ class StubGenerator: public StubCodeGenerator {
const Register dst_pos = rdi; const Register dst_pos = rdi;
const Register length = rcx; // transfer count const Register length = rcx; // transfer count
// if (src == NULL) return -1; // if (src == null) return -1;
__ movptr(src, SRC); // src oop __ movptr(src, SRC); // src oop
__ testptr(src, src); __ testptr(src, src);
__ jccb(Assembler::zero, L_failed_0); __ jccb(Assembler::zero, L_failed_0);
@ -1931,7 +1931,7 @@ class StubGenerator: public StubCodeGenerator {
__ testl(src_pos, src_pos); __ testl(src_pos, src_pos);
__ jccb(Assembler::negative, L_failed_0); __ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1; // if (dst == nullptr) return -1;
__ movptr(dst, DST); // dst oop __ movptr(dst, DST); // dst oop
__ testptr(dst, dst); __ testptr(dst, dst);
__ jccb(Assembler::zero, L_failed_0); __ jccb(Assembler::zero, L_failed_0);
@ -1946,18 +1946,18 @@ class StubGenerator: public StubCodeGenerator {
__ testl(length, length); __ testl(length, length);
__ jccb(Assembler::negative, L_failed_0); __ jccb(Assembler::negative, L_failed_0);
// if (src->klass() == NULL) return -1; // if (src->klass() == nullptr) return -1;
Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
const Register rcx_src_klass = rcx; // array klass const Register rcx_src_klass = rcx; // array klass
__ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
#ifdef ASSERT #ifdef ASSERT
// assert(src->klass() != NULL); // assert(src->klass() != nullptr);
BLOCK_COMMENT("assert klasses not null"); BLOCK_COMMENT("assert klasses not null");
{ Label L1, L2; { Label L1, L2;
__ testptr(rcx_src_klass, rcx_src_klass); __ testptr(rcx_src_klass, rcx_src_klass);
__ jccb(Assembler::notZero, L2); // it is broken if klass is NULL __ jccb(Assembler::notZero, L2); // it is broken if klass is null
__ bind(L1); __ bind(L1);
__ stop("broken null klass"); __ stop("broken null klass");
__ bind(L2); __ bind(L2);
@ -2130,7 +2130,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_fail_array_check; Label L_fail_array_check;
generate_type_check(rbx_src_klass, generate_type_check(rbx_src_klass,
super_check_offset_addr, dst_klass_addr, super_check_offset_addr, dst_klass_addr,
rdi_temp, NULL, &L_fail_array_check); rdi_temp, nullptr, &L_fail_array_check);
// (On fall-through, we have passed the array type check.) // (On fall-through, we have passed the array type check.)
__ pop(rbx); __ pop(rbx);
__ jmp(L_plain_copy); __ jmp(L_plain_copy);
@ -2194,7 +2194,7 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jbyte_disjoint_arraycopy"); "arrayof_jbyte_disjoint_arraycopy");
StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_arrayof_jbyte_arraycopy =
generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, generate_conjoint_copy(T_BYTE, true, Address::times_1, entry,
NULL, "arrayof_jbyte_arraycopy"); nullptr, "arrayof_jbyte_arraycopy");
StubRoutines::_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy =
generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry,
"jbyte_disjoint_arraycopy"); "jbyte_disjoint_arraycopy");
@ -2207,7 +2207,7 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jshort_disjoint_arraycopy"); "arrayof_jshort_disjoint_arraycopy");
StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_arrayof_jshort_arraycopy =
generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, generate_conjoint_copy(T_SHORT, true, Address::times_2, entry,
NULL, "arrayof_jshort_arraycopy"); nullptr, "arrayof_jshort_arraycopy");
StubRoutines::_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy =
generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry,
"jshort_disjoint_arraycopy"); "jshort_disjoint_arraycopy");
@ -2236,7 +2236,7 @@ class StubGenerator: public StubCodeGenerator {
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit =
generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
NULL, "oop_arraycopy_uninit", nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy =
@ -2265,7 +2265,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_checkcast_arraycopy = StubRoutines::_checkcast_arraycopy =
generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit = StubRoutines::_checkcast_arraycopy_uninit =
generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, /*dest_uninitialized*/true);
StubRoutines::_unsafe_arraycopy = StubRoutines::_unsafe_arraycopy =
generate_unsafe_copy("unsafe_arraycopy", generate_unsafe_copy("unsafe_arraycopy",
@ -3924,7 +3924,7 @@ class StubGenerator: public StubCodeGenerator {
} }
// Set up last_Java_sp and last_Java_fp // Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, NULL, noreg); __ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg);
// Call runtime // Call runtime
BLOCK_COMMENT("call runtime_entry"); BLOCK_COMMENT("call runtime_entry");
@ -4231,7 +4231,7 @@ class StubGenerator: public StubCodeGenerator {
} }
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) { if (bs_nm != nullptr) {
StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
} }
} }
@ -4251,7 +4251,7 @@ class StubGenerator: public StubCodeGenerator {
#define UCM_TABLE_MAX_ENTRIES 16 #define UCM_TABLE_MAX_ENTRIES 16
void StubGenerator_generate(CodeBuffer* code, int phase) { void StubGenerator_generate(CodeBuffer* code, int phase) {
if (UnsafeCopyMemory::_table == NULL) { if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
} }
StubGenerator g(code, phase); StubGenerator g(code, phase);

View File

@ -459,7 +459,7 @@ address StubGenerator::generate_catch_exception() {
__ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
// complete return to VM // complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, assert(StubRoutines::_call_stub_return_address != nullptr,
"_call_stub_return_address must have been generated before"); "_call_stub_return_address must have been generated before");
__ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
@ -1091,7 +1091,7 @@ address StubGenerator::generate_verify_oop() {
// make sure object is 'reasonable' // make sure object is 'reasonable'
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK __ jcc(Assembler::zero, exit); // if obj is null it is OK
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error); bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error);
@ -4085,7 +4085,7 @@ void StubGenerator::generate_all() {
} }
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) { if (bs_nm != nullptr) {
StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier();
} }
#ifdef COMPILER2 #ifdef COMPILER2
@ -4112,13 +4112,13 @@ void StubGenerator::generate_all() {
} }
// Get svml stub routine addresses // Get svml stub routine addresses
void *libjsvml = NULL; void *libjsvml = nullptr;
char ebuf[1024]; char ebuf[1024];
char dll_name[JVM_MAXPATHLEN]; char dll_name[JVM_MAXPATHLEN];
if (os::dll_locate_lib(dll_name, sizeof(dll_name), Arguments::get_dll_dir(), "jsvml")) { if (os::dll_locate_lib(dll_name, sizeof(dll_name), Arguments::get_dll_dir(), "jsvml")) {
libjsvml = os::dll_load(dll_name, ebuf, sizeof ebuf); libjsvml = os::dll_load(dll_name, ebuf, sizeof ebuf);
} }
if (libjsvml != NULL) { if (libjsvml != nullptr) {
// SVML method naming convention // SVML method naming convention
// All the methods are named as __jsvml_op<T><N>_ha_<VV> // All the methods are named as __jsvml_op<T><N>_ha_<VV>
// Where: // Where:
@ -4182,7 +4182,7 @@ void StubGenerator::generate_all() {
} }
void StubGenerator_generate(CodeBuffer* code, int phase) { void StubGenerator_generate(CodeBuffer* code, int phase) {
if (UnsafeCopyMemory::_table == NULL) { if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(16); UnsafeCopyMemory::create_table(16);
} }
StubGenerator g(code, phase); StubGenerator g(code, phase);

View File

@ -122,11 +122,11 @@ class StubGenerator: public StubCodeGenerator {
void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf); void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf);
void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
assert(no_overlap_target != NULL, "must be generated"); assert(no_overlap_target != nullptr, "must be generated");
array_overlap_test(no_overlap_target, NULL, sf); array_overlap_test(no_overlap_target, nullptr, sf);
} }
void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
array_overlap_test(NULL, &L_no_overlap, sf); array_overlap_test(nullptr, &L_no_overlap, sf);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -113,7 +113,7 @@ void StubGenerator::generate_arraycopy_stubs() {
"oop_disjoint_arraycopy_uninit", "oop_disjoint_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
NULL, "oop_arraycopy_uninit", nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
} else { } else {
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
@ -124,12 +124,12 @@ void StubGenerator::generate_arraycopy_stubs() {
"oop_disjoint_arraycopy_uninit", "oop_disjoint_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
NULL, "oop_arraycopy_uninit", nullptr, "oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
} }
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
@ -212,7 +212,7 @@ void StubGenerator::array_overlap_test(address no_overlap_target, Label* NOLp, A
__ cmpptr(to, from); __ cmpptr(to, from);
__ lea(end_from, Address(from, count, sf, 0)); __ lea(end_from, Address(from, count, sf, 0));
if (NOLp == NULL) { if (NOLp == nullptr) {
ExternalAddress no_overlap(no_overlap_target); ExternalAddress no_overlap(no_overlap_target);
__ jump_cc(Assembler::belowEqual, no_overlap); __ jump_cc(Assembler::belowEqual, no_overlap);
__ cmpptr(to, end_from); __ cmpptr(to, end_from);
@ -530,7 +530,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(address* entry, const
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -752,7 +752,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(address* entry, const
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1152,7 +1152,7 @@ address StubGenerator::generate_disjoint_byte_copy(bool aligned, address* entry,
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1265,7 +1265,7 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1383,7 +1383,7 @@ address StubGenerator::generate_disjoint_short_copy(bool aligned, address *entry
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1514,7 +1514,7 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1625,7 +1625,7 @@ address StubGenerator::generate_disjoint_int_oop_copy(bool aligned, bool is_oop,
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1732,7 +1732,7 @@ address StubGenerator::generate_conjoint_int_oop_copy(bool aligned, bool is_oop,
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1850,7 +1850,7 @@ address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop
// Save no-overlap entry point for generate_conjoint_long_oop_copy() // Save no-overlap entry point for generate_conjoint_long_oop_copy()
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -1958,7 +1958,7 @@ address StubGenerator::generate_conjoint_long_oop_copy(bool aligned, bool is_oop
__ enter(); // required for proper stackwalking of RuntimeStub frame __ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
@ -2040,9 +2040,9 @@ void StubGenerator::generate_type_check(Register sub_klass,
Label L_miss; Label L_miss;
__ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr,
super_check_offset); super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr);
// Fall through on failure! // Fall through on failure!
__ BIND(L_miss); __ BIND(L_miss);
@ -2119,7 +2119,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry,
#endif #endif
// Caller of this entry point must set up the argument registers. // Caller of this entry point must set up the argument registers.
if (entry != NULL) { if (entry != nullptr) {
*entry = __ pc(); *entry = __ pc();
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
@ -2426,13 +2426,13 @@ address StubGenerator::generate_generic_copy(const char *name,
// (2) src_pos must not be negative. // (2) src_pos must not be negative.
// (3) dst_pos must not be negative. // (3) dst_pos must not be negative.
// (4) length must not be negative. // (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL. // (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays. // (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src. // (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst. // (8) dst_pos + length must not exceed length of dst.
// //
// if (src == NULL) return -1; // if (src == nullptr) return -1;
__ testptr(src, src); // src oop __ testptr(src, src); // src oop
size_t j1off = __ offset(); size_t j1off = __ offset();
__ jccb(Assembler::zero, L_failed_0); __ jccb(Assembler::zero, L_failed_0);
@ -2441,7 +2441,7 @@ address StubGenerator::generate_generic_copy(const char *name,
__ testl(src_pos, src_pos); // src_pos (32-bits) __ testl(src_pos, src_pos); // src_pos (32-bits)
__ jccb(Assembler::negative, L_failed_0); __ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1; // if (dst == nullptr) return -1;
__ testptr(dst, dst); // dst oop __ testptr(dst, dst); // dst oop
__ jccb(Assembler::zero, L_failed_0); __ jccb(Assembler::zero, L_failed_0);
@ -2469,12 +2469,12 @@ address StubGenerator::generate_generic_copy(const char *name,
__ load_klass(r10_src_klass, src, rklass_tmp); __ load_klass(r10_src_klass, src, rklass_tmp);
#ifdef ASSERT #ifdef ASSERT
// assert(src->klass() != NULL); // assert(src->klass() != nullptr);
{ {
BLOCK_COMMENT("assert klasses not null {"); BLOCK_COMMENT("assert klasses not null {");
Label L1, L2; Label L1, L2;
__ testptr(r10_src_klass, r10_src_klass); __ testptr(r10_src_klass, r10_src_klass);
__ jcc(Assembler::notZero, L2); // it is broken if klass is NULL __ jcc(Assembler::notZero, L2); // it is broken if klass is null
__ bind(L1); __ bind(L1);
__ stop("broken null klass"); __ stop("broken null klass");
__ bind(L2); __ bind(L2);

View File

@ -33,57 +33,57 @@
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_mxcsr_entry = NULL; address StubRoutines::x86::_verify_mxcsr_entry = nullptr;
address StubRoutines::x86::_upper_word_mask_addr = NULL; address StubRoutines::x86::_upper_word_mask_addr = nullptr;
address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL; address StubRoutines::x86::_shuffle_byte_flip_mask_addr = nullptr;
address StubRoutines::x86::_k256_adr = NULL; address StubRoutines::x86::_k256_adr = nullptr;
address StubRoutines::x86::_vector_short_to_byte_mask = NULL; address StubRoutines::x86::_vector_short_to_byte_mask = nullptr;
address StubRoutines::x86::_vector_int_to_byte_mask = NULL; address StubRoutines::x86::_vector_int_to_byte_mask = nullptr;
address StubRoutines::x86::_vector_int_to_short_mask = NULL; address StubRoutines::x86::_vector_int_to_short_mask = nullptr;
address StubRoutines::x86::_vector_all_bits_set = NULL; address StubRoutines::x86::_vector_all_bits_set = nullptr;
address StubRoutines::x86::_vector_byte_shuffle_mask = NULL; address StubRoutines::x86::_vector_byte_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_int_mask_cmp_bits = NULL; address StubRoutines::x86::_vector_int_mask_cmp_bits = nullptr;
address StubRoutines::x86::_vector_short_shuffle_mask = NULL; address StubRoutines::x86::_vector_short_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_int_shuffle_mask = NULL; address StubRoutines::x86::_vector_int_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_long_shuffle_mask = NULL; address StubRoutines::x86::_vector_long_shuffle_mask = nullptr;
address StubRoutines::x86::_vector_float_sign_mask = NULL; address StubRoutines::x86::_vector_float_sign_mask = nullptr;
address StubRoutines::x86::_vector_float_sign_flip = NULL; address StubRoutines::x86::_vector_float_sign_flip = nullptr;
address StubRoutines::x86::_vector_double_sign_mask = NULL; address StubRoutines::x86::_vector_double_sign_mask = nullptr;
address StubRoutines::x86::_vector_double_sign_flip = NULL; address StubRoutines::x86::_vector_double_sign_flip = nullptr;
address StubRoutines::x86::_vector_byte_perm_mask = NULL; address StubRoutines::x86::_vector_byte_perm_mask = nullptr;
address StubRoutines::x86::_vector_long_sign_mask = NULL; address StubRoutines::x86::_vector_long_sign_mask = nullptr;
address StubRoutines::x86::_vector_iota_indices = NULL; address StubRoutines::x86::_vector_iota_indices = nullptr;
address StubRoutines::x86::_vector_reverse_bit_lut = NULL; address StubRoutines::x86::_vector_reverse_bit_lut = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_long = NULL; address StubRoutines::x86::_vector_reverse_byte_perm_mask_long = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_int = NULL; address StubRoutines::x86::_vector_reverse_byte_perm_mask_int = nullptr;
address StubRoutines::x86::_vector_reverse_byte_perm_mask_short = NULL; address StubRoutines::x86::_vector_reverse_byte_perm_mask_short = nullptr;
address StubRoutines::x86::_vector_popcount_lut = NULL; address StubRoutines::x86::_vector_popcount_lut = nullptr;
address StubRoutines::x86::_vector_count_leading_zeros_lut = NULL; address StubRoutines::x86::_vector_count_leading_zeros_lut = nullptr;
address StubRoutines::x86::_vector_32_bit_mask = NULL; address StubRoutines::x86::_vector_32_bit_mask = nullptr;
address StubRoutines::x86::_vector_64_bit_mask = NULL; address StubRoutines::x86::_vector_64_bit_mask = nullptr;
#ifdef _LP64 #ifdef _LP64
address StubRoutines::x86::_k256_W_adr = NULL; address StubRoutines::x86::_k256_W_adr = nullptr;
address StubRoutines::x86::_k512_W_addr = NULL; address StubRoutines::x86::_k512_W_addr = nullptr;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = NULL; address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = nullptr;
// Base64 masks // Base64 masks
address StubRoutines::x86::_encoding_table_base64 = NULL; address StubRoutines::x86::_encoding_table_base64 = nullptr;
address StubRoutines::x86::_shuffle_base64 = NULL; address StubRoutines::x86::_shuffle_base64 = nullptr;
address StubRoutines::x86::_avx2_shuffle_base64 = NULL; address StubRoutines::x86::_avx2_shuffle_base64 = nullptr;
address StubRoutines::x86::_avx2_input_mask_base64 = NULL; address StubRoutines::x86::_avx2_input_mask_base64 = nullptr;
address StubRoutines::x86::_avx2_lut_base64 = NULL; address StubRoutines::x86::_avx2_lut_base64 = nullptr;
address StubRoutines::x86::_avx2_decode_tables_base64 = NULL; address StubRoutines::x86::_avx2_decode_tables_base64 = nullptr;
address StubRoutines::x86::_avx2_decode_lut_tables_base64 = NULL; address StubRoutines::x86::_avx2_decode_lut_tables_base64 = nullptr;
address StubRoutines::x86::_lookup_lo_base64 = NULL; address StubRoutines::x86::_lookup_lo_base64 = nullptr;
address StubRoutines::x86::_lookup_hi_base64 = NULL; address StubRoutines::x86::_lookup_hi_base64 = nullptr;
address StubRoutines::x86::_lookup_lo_base64url = NULL; address StubRoutines::x86::_lookup_lo_base64url = nullptr;
address StubRoutines::x86::_lookup_hi_base64url = NULL; address StubRoutines::x86::_lookup_hi_base64url = nullptr;
address StubRoutines::x86::_pack_vec_base64 = NULL; address StubRoutines::x86::_pack_vec_base64 = nullptr;
address StubRoutines::x86::_join_0_1_base64 = NULL; address StubRoutines::x86::_join_0_1_base64 = nullptr;
address StubRoutines::x86::_join_1_2_base64 = NULL; address StubRoutines::x86::_join_1_2_base64 = nullptr;
address StubRoutines::x86::_join_2_3_base64 = NULL; address StubRoutines::x86::_join_2_3_base64 = nullptr;
address StubRoutines::x86::_decoding_table_base64 = NULL; address StubRoutines::x86::_decoding_table_base64 = nullptr;
#endif #endif
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL; address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = nullptr;
uint64_t StubRoutines::x86::_crc_by128_masks[] = uint64_t StubRoutines::x86::_crc_by128_masks[] =
{ {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL; address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = nullptr;
address StubRoutines::x86::_method_entry_barrier = NULL; address StubRoutines::x86::_method_entry_barrier = nullptr;
address StubRoutines::x86::_d2i_wrapper = NULL; address StubRoutines::x86::_d2i_wrapper = nullptr;
address StubRoutines::x86::_d2l_wrapper = NULL; address StubRoutines::x86::_d2l_wrapper = nullptr;
jint StubRoutines::x86::_fpu_cntrl_wrd_std = 0; jint StubRoutines::x86::_fpu_cntrl_wrd_std = 0;
jint StubRoutines::x86::_fpu_cntrl_wrd_24 = 0; jint StubRoutines::x86::_fpu_cntrl_wrd_24 = 0;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,15 +33,15 @@
jint StubRoutines::x86::_mxcsr_std = 0; jint StubRoutines::x86::_mxcsr_std = 0;
address StubRoutines::x86::_get_previous_sp_entry = NULL; address StubRoutines::x86::_get_previous_sp_entry = nullptr;
address StubRoutines::x86::_f2i_fixup = NULL; address StubRoutines::x86::_f2i_fixup = nullptr;
address StubRoutines::x86::_f2l_fixup = NULL; address StubRoutines::x86::_f2l_fixup = nullptr;
address StubRoutines::x86::_d2i_fixup = NULL; address StubRoutines::x86::_d2i_fixup = nullptr;
address StubRoutines::x86::_d2l_fixup = NULL; address StubRoutines::x86::_d2l_fixup = nullptr;
address StubRoutines::x86::_float_sign_mask = NULL; address StubRoutines::x86::_float_sign_mask = nullptr;
address StubRoutines::x86::_float_sign_flip = NULL; address StubRoutines::x86::_float_sign_flip = nullptr;
address StubRoutines::x86::_double_sign_mask = NULL; address StubRoutines::x86::_double_sign_mask = nullptr;
address StubRoutines::x86::_double_sign_flip = NULL; address StubRoutines::x86::_double_sign_flip = nullptr;
address StubRoutines::x86::_method_entry_barrier = NULL; address StubRoutines::x86::_method_entry_barrier = nullptr;

View File

@ -146,7 +146,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address TemplateInterpreterGenerator::generate_exception_handler_common( address TemplateInterpreterGenerator::generate_exception_handler_common(
const char* name, const char* message, bool pass_oop) { const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both"); assert(!pass_oop || message == nullptr, "either oop or message but not both");
address entry = __ pc(); address entry = __ pc();
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
@ -206,7 +206,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Restore stack bottom in case i2c adjusted stack // Restore stack bottom in case i2c adjusted stack
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call // and null it as marker that esp is now tos until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ restore_bcp(); __ restore_bcp();
@ -254,7 +254,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
} }
#endif // _LP64 #endif // _LP64
// NULL last_sp until next java call // null last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -297,7 +297,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ should_not_reach_here(); __ should_not_reach_here();
__ bind(L); __ bind(L);
} }
if (continuation == NULL) { if (continuation == nullptr) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else { } else {
__ jump_to_entry(continuation); __ jump_to_entry(continuation);
@ -434,8 +434,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
// InterpreterRuntime::frequency_counter_overflow takes two // InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second // arguments, the first (thread) is passed by call_VM, the second
// indicates if the counter overflow occurs at a backwards branch // indicates if the counter overflow occurs at a backwards branch
// (NULL bcp). We pass zero for it. The call returns the address // (null bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or NULL if the // of the verified entry point for the method or null if the
// compilation did not complete (either went background or bailed // compilation did not complete (either went background or bailed
// out). // out).
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
@ -535,7 +535,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted. // Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError. // Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
// all done with frame size check // all done with frame size check
__ bind(after_frame_check_pop); __ bind(after_frame_check_pop);
@ -589,7 +589,7 @@ void TemplateInterpreterGenerator::lock_method() {
Label L; Label L;
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("synchronization object is NULL"); __ stop("synchronization object is null");
__ bind(L); __ bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -687,7 +687,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
Label slow_path; Label slow_path;
// rbx: method // rbx: method
// Check if local 0 != NULL // Check if local 0 != null
// If the receiver is null then it is OK to jump to the slow path. // If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize)); __ movptr(rax, Address(rsp, wordSize));
@ -1302,7 +1302,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry // abstract method entry
// pop return address, reset last_sp to NULL // pop return address, reset last_sp to null
__ empty_expression_stack(); __ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed) __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed) __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
@ -1650,7 +1650,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ jcc(Assembler::notEqual, L_done); __ jcc(Assembler::notEqual, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. // Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
__ get_method(rdx); __ get_method(rdx);
__ movptr(rax, Address(local0, 0)); __ movptr(rax, Address(local0, 0));
@ -1839,7 +1839,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// The run-time runtime saves the right registers, depending on // The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template. // the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL, assert(Interpreter::trace_code(t->tos_in()) != nullptr,
"entry must have been generated"); "entry must have been generated");
#ifndef _LP64 #ifndef _LP64
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));

View File

@ -225,7 +225,7 @@ address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() {
return entry; return entry;
} }
return NULL; return nullptr;
} }
/** /**
@ -251,7 +251,7 @@ address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
return entry; return entry;
} }
return NULL; return nullptr;
} }
@ -278,7 +278,7 @@ address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() {
return entry; return entry;
} }
return NULL; return nullptr;
} }
/** /**
@ -305,7 +305,7 @@ address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry(
return entry; return entry;
} }
return NULL; return nullptr;
} }
/** /**
@ -375,7 +375,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
// //
if (kind == Interpreter::java_lang_math_fmaD) { if (kind == Interpreter::java_lang_math_fmaD) {
if (!UseFMA) { if (!UseFMA) {
return NULL; // Generate a vanilla entry return nullptr; // Generate a vanilla entry
} }
__ movdbl(xmm2, Address(rsp, 5 * wordSize)); __ movdbl(xmm2, Address(rsp, 5 * wordSize));
__ movdbl(xmm1, Address(rsp, 3 * wordSize)); __ movdbl(xmm1, Address(rsp, 3 * wordSize));
@ -388,7 +388,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
return entry_point; return entry_point;
} else if (kind == Interpreter::java_lang_math_fmaF) { } else if (kind == Interpreter::java_lang_math_fmaF) {
if (!UseFMA) { if (!UseFMA) {
return NULL; // Generate a vanilla entry return nullptr; // Generate a vanilla entry
} }
__ movflt(xmm2, Address(rsp, 3 * wordSize)); __ movflt(xmm2, Address(rsp, 3 * wordSize));
__ movflt(xmm1, Address(rsp, 2 * wordSize)); __ movflt(xmm1, Address(rsp, 2 * wordSize));
@ -406,7 +406,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_sin : case Interpreter::java_lang_math_sin :
__ subptr(rsp, 2 * wordSize); __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) { if (VM_Version::supports_sse2() && StubRoutines::dsin() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin));
@ -416,7 +416,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_cos : case Interpreter::java_lang_math_cos :
__ subptr(rsp, 2 * wordSize); __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) { if (VM_Version::supports_sse2() && StubRoutines::dcos() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos));
@ -426,7 +426,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_tan : case Interpreter::java_lang_math_tan :
__ subptr(rsp, 2 * wordSize); __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (StubRoutines::dtan() != NULL) { if (StubRoutines::dtan() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
@ -442,7 +442,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_log: case Interpreter::java_lang_math_log:
__ subptr(rsp, 2 * wordSize); __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (StubRoutines::dlog() != NULL) { if (StubRoutines::dlog() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog));
@ -452,7 +452,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_log10: case Interpreter::java_lang_math_log10:
__ subptr(rsp, 2 * wordSize); __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (StubRoutines::dlog10() != NULL) { if (StubRoutines::dlog10() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10));
@ -464,7 +464,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ subptr(rsp, 4 * wordSize); __ subptr(rsp, 4 * wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
__ fstp_d(Address(rsp, 2 * wordSize)); __ fstp_d(Address(rsp, 2 * wordSize));
if (StubRoutines::dpow() != NULL) { if (StubRoutines::dpow() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow));
@ -474,7 +474,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_exp: case Interpreter::java_lang_math_exp:
__ subptr(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
if (StubRoutines::dexp() != NULL) { if (StubRoutines::dexp() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp));

View File

@ -58,7 +58,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// stack args // stack args
// garbage // garbage
// expression stack bottom // expression stack bottom
// bcp (NULL) // bcp (null)
// ... // ...
// Do FP first so we can use c_rarg3 as temp // Do FP first so we can use c_rarg3 as temp
@ -138,7 +138,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// stack args // stack args
// garbage // garbage
// expression stack bottom // expression stack bottom
// bcp (NULL) // bcp (null)
// ... // ...
// Do FP first so we can use c_rarg3 as temp // Do FP first so we can use c_rarg3 as temp
@ -399,7 +399,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
if (kind == Interpreter::java_lang_math_fmaD) { if (kind == Interpreter::java_lang_math_fmaD) {
if (!UseFMA) { if (!UseFMA) {
return NULL; // Generate a vanilla entry return nullptr; // Generate a vanilla entry
} }
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
__ movdbl(xmm1, Address(rsp, 3 * wordSize)); __ movdbl(xmm1, Address(rsp, 3 * wordSize));
@ -407,7 +407,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ fmad(xmm0, xmm1, xmm2, xmm0); __ fmad(xmm0, xmm1, xmm2, xmm0);
} else if (kind == Interpreter::java_lang_math_fmaF) { } else if (kind == Interpreter::java_lang_math_fmaF) {
if (!UseFMA) { if (!UseFMA) {
return NULL; // Generate a vanilla entry return nullptr; // Generate a vanilla entry
} }
__ movflt(xmm0, Address(rsp, wordSize)); __ movflt(xmm0, Address(rsp, wordSize));
__ movflt(xmm1, Address(rsp, 2 * wordSize)); __ movflt(xmm1, Address(rsp, 2 * wordSize));
@ -417,35 +417,35 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ sqrtsd(xmm0, Address(rsp, wordSize)); __ sqrtsd(xmm0, Address(rsp, wordSize));
} else if (kind == Interpreter::java_lang_math_exp) { } else if (kind == Interpreter::java_lang_math_exp) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dexp() != NULL) { if (StubRoutines::dexp() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp));
} }
} else if (kind == Interpreter::java_lang_math_log) { } else if (kind == Interpreter::java_lang_math_log) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dlog() != NULL) { if (StubRoutines::dlog() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog));
} }
} else if (kind == Interpreter::java_lang_math_log10) { } else if (kind == Interpreter::java_lang_math_log10) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dlog10() != NULL) { if (StubRoutines::dlog10() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10));
} }
} else if (kind == Interpreter::java_lang_math_sin) { } else if (kind == Interpreter::java_lang_math_sin) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dsin() != NULL) { if (StubRoutines::dsin() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin));
} }
} else if (kind == Interpreter::java_lang_math_cos) { } else if (kind == Interpreter::java_lang_math_cos) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dcos() != NULL) { if (StubRoutines::dcos() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos));
@ -453,20 +453,20 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else if (kind == Interpreter::java_lang_math_pow) { } else if (kind == Interpreter::java_lang_math_pow) {
__ movdbl(xmm1, Address(rsp, wordSize)); __ movdbl(xmm1, Address(rsp, wordSize));
__ movdbl(xmm0, Address(rsp, 3 * wordSize)); __ movdbl(xmm0, Address(rsp, 3 * wordSize));
if (StubRoutines::dpow() != NULL) { if (StubRoutines::dpow() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow));
} }
} else if (kind == Interpreter::java_lang_math_tan) { } else if (kind == Interpreter::java_lang_math_tan) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
if (StubRoutines::dtan() != NULL) { if (StubRoutines::dtan() != nullptr) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
} else { } else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
} }
} else if (kind == Interpreter::java_lang_math_abs) { } else if (kind == Interpreter::java_lang_math_abs) {
assert(StubRoutines::x86::double_sign_mask() != NULL, "not initialized"); assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
__ andpd(xmm0, ExternalAddress(StubRoutines::x86::double_sign_mask())); __ andpd(xmm0, ExternalAddress(StubRoutines::x86::double_sign_mask()));
} else { } else {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -143,8 +143,8 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) {
// Miscellaneous helper routines // Miscellaneous helper routines
// Store an oop (or NULL) at the address described by obj. // Store an oop (or null) at the address described by obj.
// If val == noreg this means store a NULL // If val == noreg this means store a null
static void do_oop_store(InterpreterMacroAssembler* _masm, static void do_oop_store(InterpreterMacroAssembler* _masm,
@ -452,7 +452,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ resolve_oop_handle(tmp, rscratch2); __ resolve_oop_handle(tmp, rscratch2);
__ cmpoop(tmp, result); __ cmpoop(tmp, result);
__ jccb(Assembler::notEqual, notNull); __ jccb(Assembler::notEqual, notNull);
__ xorptr(result, result); // NULL object reference __ xorptr(result, result); // null object reference
__ bind(notNull); __ bind(notNull);
} }
@ -1155,11 +1155,11 @@ void TemplateTable::aastore() {
do_oop_store(_masm, element_address, rax, IS_ARRAY); do_oop_store(_masm, element_address, rax, IS_ARRAY);
__ jmp(done); __ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] // Have a null in rax, rdx=array, ecx=index. Store null at ary[idx]
__ bind(is_null); __ bind(is_null);
__ profile_null_seen(rbx); __ profile_null_seen(rbx);
// Store a NULL // Store a null
do_oop_store(_masm, element_address, noreg, IS_ARRAY); do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments // Pop stack arguments
@ -2208,7 +2208,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset())); const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, mask, rax, __ increment_mask_and_jump(mdo_backedge_counter, mask, rax,
UseOnStackReplacement ? &backedge_counter_overflow : NULL); UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ jmp(dispatch); __ jmp(dispatch);
} }
__ bind(no_mdo); __ bind(no_mdo);
@ -2216,7 +2216,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset())); const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), mask, rax, __ increment_mask_and_jump(Address(rcx, be_offset), mask, rax,
UseOnStackReplacement ? &backedge_counter_overflow : NULL); UseOnStackReplacement ? &backedge_counter_overflow : nullptr);
__ bind(dispatch); __ bind(dispatch);
} }
@ -2242,7 +2242,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
InterpreterRuntime::frequency_counter_overflow), InterpreterRuntime::frequency_counter_overflow),
rdx); rdx);
// rax: osr nmethod (osr ok) or NULL (osr not possible) // rax: osr nmethod (osr ok) or null (osr not possible)
// rdx: scratch // rdx: scratch
// r14: locals pointer // r14: locals pointer
// r13: bcp // r13: bcp
@ -2687,7 +2687,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ load_resolved_method_at_index(byte_no, method, cache, index); __ load_resolved_method_at_index(byte_no, method, cache, index);
__ load_method_holder(klass, method); __ load_method_holder(klass, method);
__ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow); __ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
} }
} }
@ -2774,13 +2774,13 @@ void TemplateTable::jvmti_post_field_access(Register cache,
__ shll(index, LogBytesPerWord); __ shll(index, LogBytesPerWord);
__ addptr(cache, index); __ addptr(cache, index);
if (is_static) { if (is_static) {
__ xorptr(rax, rax); // NULL object reference __ xorptr(rax, rax); // null object reference
} else { } else {
__ pop(atos); // Get the object __ pop(atos); // Get the object
__ verify_oop(rax); __ verify_oop(rax);
__ push(atos); // Restore stack state __ push(atos); // Restore stack state
} }
// rax,: object pointer or NULL // rax,: object pointer or null
// cache: cache entry pointer // cache: cache entry pointer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
rax, cache); rax, cache);
@ -3031,7 +3031,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
__ addptr(robj, RDX); __ addptr(robj, RDX);
// object (tos) // object (tos)
__ mov(RCX, rsp); __ mov(RCX, rsp);
// c_rarg1: object pointer set up above (NULL if static) // c_rarg1: object pointer set up above (null if static)
// c_rarg2: cache entry pointer // c_rarg2: cache entry pointer
// c_rarg3: jvalue object on the stack // c_rarg3: jvalue object on the stack
__ call_VM(noreg, __ call_VM(noreg,
@ -4112,7 +4112,7 @@ void TemplateTable::checkcast() {
__ bind(ok_is_subtype); __ bind(ok_is_subtype);
__ mov(rax, rdx); // Restore object in rdx __ mov(rax, rdx); // Restore object in rdx
// Collect counts on whether this check-cast sees NULLs a lot or not. // Collect counts on whether this check-cast sees nulls a lot or not.
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ jmp(done); __ jmp(done);
__ bind(is_null); __ bind(is_null);
@ -4175,7 +4175,7 @@ void TemplateTable::instanceof() {
__ bind(ok_is_subtype); __ bind(ok_is_subtype);
__ movl(rax, 1); __ movl(rax, 1);
// Collect counts on whether this test sees NULLs a lot or not. // Collect counts on whether this test sees nulls a lot or not.
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ jmp(done); __ jmp(done);
__ bind(is_null); __ bind(is_null);
@ -4184,8 +4184,8 @@ void TemplateTable::instanceof() {
__ bind(is_null); // same as 'done' __ bind(is_null); // same as 'done'
} }
__ bind(done); __ bind(done);
// rax = 0: obj == NULL or obj is not an instanceof the specified klass // rax = 0: obj == nullptr or obj is not an instanceof the specified klass
// rax = 1: obj != NULL and obj is an instanceof the specified klass // rax = 1: obj != nullptr and obj is an instanceof the specified klass
} }
@ -4247,7 +4247,7 @@ void TemplateTable::athrow() {
void TemplateTable::monitorenter() { void TemplateTable::monitorenter() {
transition(atos, vtos); transition(atos, vtos);
// check for NULL object // check for null object
__ null_check(rax); __ null_check(rax);
const Address monitor_block_top( const Address monitor_block_top(
@ -4263,7 +4263,7 @@ void TemplateTable::monitorenter() {
Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx); Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
// initialize entry pointer // initialize entry pointer
__ xorl(rmon, rmon); // points to free slot or NULL __ xorl(rmon, rmon); // points to free slot or null
// find a free slot in the monitor block (result in rmon) // find a free slot in the monitor block (result in rmon)
{ {
@ -4344,7 +4344,7 @@ void TemplateTable::monitorenter() {
void TemplateTable::monitorexit() { void TemplateTable::monitorexit() {
transition(atos, vtos); transition(atos, vtos);
// check for NULL object // check for null object
__ null_check(rax); __ null_check(rax);
const Address monitor_block_top( const Address monitor_block_top(

View File

@ -63,8 +63,8 @@ extern "C" {
typedef void (*get_cpu_info_stub_t)(void*); typedef void (*get_cpu_info_stub_t)(void*);
typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*); typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
} }
static get_cpu_info_stub_t get_cpu_info_stub = NULL; static get_cpu_info_stub_t get_cpu_info_stub = nullptr;
static detect_virt_stub_t detect_virt_stub = NULL; static detect_virt_stub_t detect_virt_stub = nullptr;
#ifdef _LP64 #ifdef _LP64
@ -402,7 +402,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// //
// Some OSs have a bug when upper 128/256bits of YMM/ZMM // Some OSs have a bug when upper 128/256bits of YMM/ZMM
// registers are not restored after a signal processing. // registers are not restored after a signal processing.
// Generate SEGV here (reference through NULL) // Generate SEGV here (reference through null)
// and check upper YMM/ZMM bits after it. // and check upper YMM/ZMM bits after it.
// //
int saved_useavx = UseAVX; int saved_useavx = UseAVX;
@ -2111,7 +2111,7 @@ void VM_Version::initialize() {
ResourceMark rm; ResourceMark rm;
// Making this stub must be FIRST use of assembler // Making this stub must be FIRST use of assembler
stub_blob = BufferBlob::create("VM_Version stub", stub_size); stub_blob = BufferBlob::create("VM_Version stub", stub_size);
if (stub_blob == NULL) { if (stub_blob == nullptr) {
vm_exit_during_initialization("Unable to allocate stub for VM_Version"); vm_exit_during_initialization("Unable to allocate stub for VM_Version");
} }
CodeBuffer c(stub_blob); CodeBuffer c(stub_blob);
@ -2185,7 +2185,7 @@ extern "C" {
typedef void (*getCPUIDBrandString_stub_t)(void*); typedef void (*getCPUIDBrandString_stub_t)(void*);
} }
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = NULL; static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
// VM_Version statics // VM_Version statics
enum { enum {
@ -2195,7 +2195,7 @@ enum {
const size_t VENDOR_LENGTH = 13; const size_t VENDOR_LENGTH = 13;
const size_t CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1); const size_t CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1);
static char* _cpu_brand_string = NULL; static char* _cpu_brand_string = nullptr;
static int64_t _max_qualified_cpu_frequency = 0; static int64_t _max_qualified_cpu_frequency = 0;
static int _no_of_threads = 0; static int _no_of_threads = 0;
@ -2320,7 +2320,7 @@ const char* const _model_id_pentium_pro[] = {
"", "",
"Haswell", // 0x45 "4th Generation Intel Core Processor" "Haswell", // 0x45 "4th Generation Intel Core Processor"
"Haswell", // 0x46 "4th Generation Intel Core Processor" "Haswell", // 0x46 "4th Generation Intel Core Processor"
NULL nullptr
}; };
/* Brand ID is for back compatibility /* Brand ID is for back compatibility
@ -2335,7 +2335,7 @@ const char* const _brand_id[] = {
"", "",
"", "",
"Intel Pentium 4 processor", "Intel Pentium 4 processor",
NULL nullptr
}; };
@ -2483,7 +2483,7 @@ void VM_Version::initialize_tsc(void) {
ResourceMark rm; ResourceMark rm;
cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size); cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size);
if (cpuid_brand_string_stub_blob == NULL) { if (cpuid_brand_string_stub_blob == nullptr) {
vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub"); vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub");
} }
CodeBuffer c(cpuid_brand_string_stub_blob); CodeBuffer c(cpuid_brand_string_stub_blob);
@ -2495,12 +2495,12 @@ void VM_Version::initialize_tsc(void) {
const char* VM_Version::cpu_model_description(void) { const char* VM_Version::cpu_model_description(void) {
uint32_t cpu_family = extended_cpu_family(); uint32_t cpu_family = extended_cpu_family();
uint32_t cpu_model = extended_cpu_model(); uint32_t cpu_model = extended_cpu_model();
const char* model = NULL; const char* model = nullptr;
if (cpu_family == CPU_FAMILY_PENTIUMPRO) { if (cpu_family == CPU_FAMILY_PENTIUMPRO) {
for (uint32_t i = 0; i <= cpu_model; i++) { for (uint32_t i = 0; i <= cpu_model; i++) {
model = _model_id_pentium_pro[i]; model = _model_id_pentium_pro[i];
if (model == NULL) { if (model == nullptr) {
break; break;
} }
} }
@ -2509,27 +2509,27 @@ const char* VM_Version::cpu_model_description(void) {
} }
const char* VM_Version::cpu_brand_string(void) { const char* VM_Version::cpu_brand_string(void) {
if (_cpu_brand_string == NULL) { if (_cpu_brand_string == nullptr) {
_cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal); _cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal);
if (NULL == _cpu_brand_string) { if (nullptr == _cpu_brand_string) {
return NULL; return nullptr;
} }
int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH); int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH);
if (ret_val != OS_OK) { if (ret_val != OS_OK) {
FREE_C_HEAP_ARRAY(char, _cpu_brand_string); FREE_C_HEAP_ARRAY(char, _cpu_brand_string);
_cpu_brand_string = NULL; _cpu_brand_string = nullptr;
} }
} }
return _cpu_brand_string; return _cpu_brand_string;
} }
const char* VM_Version::cpu_brand(void) { const char* VM_Version::cpu_brand(void) {
const char* brand = NULL; const char* brand = nullptr;
if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) { if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) {
int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF; int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF;
brand = _brand_id[0]; brand = _brand_id[0];
for (int i = 0; brand != NULL && i <= brand_num; i += 1) { for (int i = 0; brand != nullptr && i <= brand_num; i += 1) {
brand = _brand_id[i]; brand = _brand_id[i];
} }
} }
@ -2619,11 +2619,11 @@ const char* VM_Version::cpu_family_description(void) {
} }
int VM_Version::cpu_type_description(char* const buf, size_t buf_len) { int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!"); assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!"); assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!");
const char* cpu_type = NULL; const char* cpu_type = nullptr;
const char* x64 = NULL; const char* x64 = nullptr;
if (is_intel()) { if (is_intel()) {
cpu_type = "Intel"; cpu_type = "Intel";
@ -2656,9 +2656,9 @@ int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
} }
int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) { int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!"); assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!"); assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!");
assert(getCPUIDBrandString_stub != NULL, "not initialized"); assert(getCPUIDBrandString_stub != nullptr, "not initialized");
// invoke newly generated asm code to fetch CPU Brand String // invoke newly generated asm code to fetch CPU Brand String
getCPUIDBrandString_stub(&_cpuid_info); getCPUIDBrandString_stub(&_cpuid_info);
@ -2681,7 +2681,7 @@ int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
} }
size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) { size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
guarantee(buf != NULL, "buffer is NULL!"); guarantee(buf != nullptr, "buffer is null!");
guarantee(buf_len > 0, "buffer len not enough!"); guarantee(buf_len > 0, "buffer len not enough!");
unsigned int flag = 0; unsigned int flag = 0;
@ -2742,31 +2742,31 @@ size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
* feature set. * feature set.
*/ */
int VM_Version::cpu_detailed_description(char* const buf, size_t buf_len) { int VM_Version::cpu_detailed_description(char* const buf, size_t buf_len) {
assert(buf != NULL, "buffer is NULL!"); assert(buf != nullptr, "buffer is null!");
assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!"); assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!");
static const char* unknown = "<unknown>"; static const char* unknown = "<unknown>";
char vendor_id[VENDOR_LENGTH]; char vendor_id[VENDOR_LENGTH];
const char* family = NULL; const char* family = nullptr;
const char* model = NULL; const char* model = nullptr;
const char* brand = NULL; const char* brand = nullptr;
int outputLen = 0; int outputLen = 0;
family = cpu_family_description(); family = cpu_family_description();
if (family == NULL) { if (family == nullptr) {
family = unknown; family = unknown;
} }
model = cpu_model_description(); model = cpu_model_description();
if (model == NULL) { if (model == nullptr) {
model = unknown; model = unknown;
} }
brand = cpu_brand_string(); brand = cpu_brand_string();
if (brand == NULL) { if (brand == nullptr) {
brand = cpu_brand(); brand = cpu_brand();
if (brand == NULL) { if (brand == nullptr) {
brand = unknown; brand = unknown;
} }
} }
@ -2835,7 +2835,7 @@ void VM_Version::initialize_cpu_information() {
*/ */
int64_t VM_Version::max_qualified_cpu_freq_from_brand_string(void) { int64_t VM_Version::max_qualified_cpu_freq_from_brand_string(void) {
const char* const brand_string = cpu_brand_string(); const char* const brand_string = cpu_brand_string();
if (brand_string == NULL) { if (brand_string == nullptr) {
return 0; return 0;
} }
const int64_t MEGA = 1000000; const int64_t MEGA = 1000000;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -60,9 +60,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true); const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
@ -129,7 +129,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD); __ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL"); __ stop("Vtable entry is null");
__ bind(L); __ bind(L);
} }
#endif // PRODUCT #endif // PRODUCT
@ -152,9 +152,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false); const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid // We add them to the computed buffer size in order to avoid

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -49,9 +49,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true); const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
@ -120,7 +120,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD); __ cmpptr(Address(method, Method::from_compiled_offset()), NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL"); __ stop("Vtable entry is null");
__ bind(L); __ bind(L);
} }
#endif // PRODUCT #endif // PRODUCT
@ -143,9 +143,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false); const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.