8300081: Replace NULL with nullptr in share/asm/

Reviewed-by: coleenp
This commit is contained in:
Johan Sjölen 2023-05-16 15:40:39 +00:00
parent 41ee125a0f
commit 9d5bab11f0
5 changed files with 99 additions and 99 deletions

View File

@ -42,10 +42,10 @@
// The code buffer is updated via set_code_end(...) after emitting a whole instruction.
AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
if (code == NULL) return;
if (code == nullptr) return;
CodeSection* cs = code->insts();
cs->clear_mark(); // new assembler kills old mark
if (cs->start() == NULL) {
if (cs->start() == nullptr) {
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "CodeCache: no room for %s", code->name());
}
_code_section = cs;
@ -66,15 +66,15 @@ address AbstractAssembler::start_a_stub(int required_space) {
CodeSection* cs = cb->stubs();
assert(_code_section == cb->insts(), "not in insts?");
if (cs->maybe_expand_to_ensure_remaining(required_space)
&& cb->blob() == NULL) {
return NULL;
&& cb->blob() == nullptr) {
return nullptr;
}
set_code_section(cs);
return pc();
}
// Inform CodeBuffer that incoming code and relocation will be code
// Should not be called if start_a_stub() returned NULL
// Should not be called if start_a_stub() returned null
void AbstractAssembler::end_a_stub() {
assert(_code_section == code()->stubs(), "not in stubs?");
set_code_section(code()->insts());
@ -88,7 +88,7 @@ address AbstractAssembler::start_a_const(int required_space, int required_align)
address end = cs->end();
int pad = -(intptr_t)end & (required_align-1);
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
if (cb->blob() == NULL) return NULL;
if (cb->blob() == nullptr) return nullptr;
end = cs->end(); // refresh pointer
}
if (pad > 0) {
@ -162,7 +162,7 @@ void Label::add_patch_at(CodeBuffer* cb, int branch_loc, const char* file, int l
_files[_patch_index] = file;
#endif
} else {
if (_patch_overflow == NULL) {
if (_patch_overflow == nullptr) {
_patch_overflow = cb->create_patch_overflow();
}
_patch_overflow->push(branch_loc);
@ -179,7 +179,7 @@ void Label::patch_instructions(MacroAssembler* masm) {
--_patch_index;
int branch_loc;
int line = 0;
const char* file = NULL;
const char* file = nullptr;
if (_patch_index >= PatchCacheSize) {
branch_loc = _patch_overflow->pop();
} else {
@ -212,7 +212,7 @@ const char* AbstractAssembler::code_string(const char* str) {
if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) {
return code_section()->outer()->code_string(str);
}
return NULL;
return nullptr;
}
bool MacroAssembler::uses_implicit_null_check(void* address) {
@ -221,7 +221,7 @@ bool MacroAssembler::uses_implicit_null_check(void* address) {
uintptr_t addr = reinterpret_cast<uintptr_t>(address);
uintptr_t page_size = (uintptr_t)os::vm_page_size();
#ifdef _LP64
if (UseCompressedOops && CompressedOops::base() != NULL) {
if (UseCompressedOops && CompressedOops::base() != nullptr) {
// A SEGV can legitimately happen in C2 code at address
// (heap_base + offset) if Matcher::narrow_oop_use_complex_address
// is configured to allow narrow oops field loads to be implicitly

View File

@ -145,7 +145,7 @@ class Label {
* @param cb the code buffer being patched
* @param branch_loc the locator of the branch instruction in the code buffer
*/
void add_patch_at(CodeBuffer* cb, int branch_loc, const char* file = NULL, int line = 0);
void add_patch_at(CodeBuffer* cb, int branch_loc, const char* file = nullptr, int line = 0);
/**
* Iterate over the list of patches, resolving the instructions
@ -156,7 +156,7 @@ class Label {
void init() {
_loc = -1;
_patch_index = 0;
_patch_overflow = NULL;
_patch_overflow = nullptr;
_is_near = false;
}
@ -233,7 +233,7 @@ class AbstractAssembler : public ResourceObj {
public:
InstructionMark(AbstractAssembler* assm) : _assm(assm) {
assert(assm->inst_mark() == NULL, "overlapping instructions");
assert(assm->inst_mark() == nullptr, "overlapping instructions");
_assm->set_inst_mark();
}
~InstructionMark() {
@ -359,7 +359,7 @@ class AbstractAssembler : public ResourceObj {
// Constants in code
void relocate(RelocationHolder const& rspec, int format = 0) {
assert(!pd_check_instruction_mark()
|| inst_mark() == NULL || inst_mark() == code_section()->end(),
|| inst_mark() == nullptr || inst_mark() == code_section()->end(),
"call relocate() between instructions");
code_section()->relocate(code_section()->end(), rspec, format);
}
@ -396,7 +396,7 @@ class AbstractAssembler : public ResourceObj {
address int_constant(jint c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
emit_int32(c);
end_a_const(c1);
}
@ -405,7 +405,7 @@ class AbstractAssembler : public ResourceObj {
address long_constant(jlong c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
emit_int64(c);
end_a_const(c1);
}
@ -414,7 +414,7 @@ class AbstractAssembler : public ResourceObj {
address double_constant(jdouble c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
emit_double(c);
end_a_const(c1);
}
@ -423,7 +423,7 @@ class AbstractAssembler : public ResourceObj {
address float_constant(jfloat c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
emit_float(c);
end_a_const(c1);
}
@ -432,7 +432,7 @@ class AbstractAssembler : public ResourceObj {
address address_constant(address c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
emit_address(c);
end_a_const(c1);
}
@ -441,7 +441,7 @@ class AbstractAssembler : public ResourceObj {
address address_constant(address c, RelocationHolder const& rspec) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
if (ptr != nullptr) {
relocate(rspec);
emit_address(c);
end_a_const(c1);
@ -453,7 +453,7 @@ class AbstractAssembler : public ResourceObj {
int len = c->length();
int size = type2aelembytes(bt) * len;
address ptr = start_a_const(size, alignment);
if (ptr != NULL) {
if (ptr != nullptr) {
for (int i = 0; i < len; i++) {
jvalue e = c->at(i);
switch(bt) {

View File

@ -102,9 +102,9 @@ void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
assert(SECT_LIMIT == 3, "total_size explicitly lists all section alignments");
int total_size = code_size + _consts.alignment() + _insts.alignment() + _stubs.alignment() + SECT_LIMIT * slop;
assert(blob() == NULL, "only once");
assert(blob() == nullptr, "only once");
set_blob(BufferBlob::create(_name, total_size));
if (blob() == NULL) {
if (blob() == nullptr) {
// The assembler constructor will throw a fatal on an empty CodeBuffer.
return; // caller must test this
}
@ -130,7 +130,7 @@ CodeBuffer::~CodeBuffer() {
// If we allocated our code buffer from the CodeCache via a BufferBlob, and
// it's not permanent, then free the BufferBlob. The rest of the memory
// will be freed when the ResourceObj is released.
for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
for (CodeBuffer* cb = this; cb != nullptr; cb = cb->before_expand()) {
// Previous incarnations of this buffer are held live, so that internal
// addresses constructed before expansions will not be confused.
cb->free_blob();
@ -171,7 +171,7 @@ void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
void CodeBuffer::set_blob(BufferBlob* blob) {
_blob = blob;
if (blob != NULL) {
if (blob != nullptr) {
address start = blob->content_begin();
address end = blob->content_end();
// Round up the starting address.
@ -191,21 +191,21 @@ void CodeBuffer::set_blob(BufferBlob* blob) {
}
void CodeBuffer::free_blob() {
if (_blob != NULL) {
if (_blob != nullptr) {
BufferBlob::free(_blob);
set_blob(NULL);
set_blob(nullptr);
}
}
const char* CodeBuffer::code_section_name(int n) {
#ifdef PRODUCT
return NULL;
return nullptr;
#else //PRODUCT
switch (n) {
case SECT_CONSTS: return "consts";
case SECT_INSTS: return "insts";
case SECT_STUBS: return "stubs";
default: return NULL;
default: return nullptr;
}
#endif //PRODUCT
}
@ -236,14 +236,14 @@ bool CodeBuffer::is_backward_branch(Label& L) {
#ifndef PRODUCT
address CodeBuffer::decode_begin() {
address begin = _insts.start();
if (_decode_begin != NULL && _decode_begin > begin)
if (_decode_begin != nullptr && _decode_begin > begin)
begin = _decode_begin;
return begin;
}
#endif // !PRODUCT
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
if (_overflow_arena == NULL) {
if (_overflow_arena == nullptr) {
_overflow_arena = new (mtCode) Arena(mtCode);
}
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
@ -269,7 +269,7 @@ address CodeSection::target(Label& L, address branch_pc) {
// Need to return a pc, doesn't matter what it is since it will be
// replaced during resolution later.
// Don't return NULL or badAddress, since branches shouldn't overflow.
// Don't return null or badAddress, since branches shouldn't overflow.
// Don't return base either because that could overflow displacements
// for shorter branches. It will get checked when bound.
return branch_pc;
@ -365,7 +365,7 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format)
}
void CodeSection::initialize_locs(int locs_capacity) {
assert(_locs_start == NULL, "only one locs init step, please");
assert(_locs_start == nullptr, "only one locs init step, please");
// Apply a priori lower limits to relocation size:
csize_t min_locs = MAX2(size() / 16, (csize_t)4);
if (locs_capacity < min_locs) locs_capacity = min_locs;
@ -377,7 +377,7 @@ void CodeSection::initialize_locs(int locs_capacity) {
}
void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
assert(_locs_start == NULL, "do this before locs are allocated");
assert(_locs_start == nullptr, "do this before locs are allocated");
// Internal invariant: locs buf must be fully aligned.
// See copy_relocations_to() below.
while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
@ -403,7 +403,7 @@ void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
}
void CodeSection::expand_locs(int new_capacity) {
if (_locs_start == NULL) {
if (_locs_start == nullptr) {
initialize_locs(new_capacity);
return;
} else {
@ -468,8 +468,8 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
}
const CodeSection* prev_cs = NULL;
CodeSection* prev_dest_cs = NULL;
const CodeSection* prev_cs = nullptr;
CodeSection* prev_dest_cs = nullptr;
for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
// figure compact layout of each section
@ -481,7 +481,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
// Compute initial padding; assign it to the previous non-empty guy.
// Cf. figure_expanded_capacities.
csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
if (prev_dest_cs != NULL) {
if (prev_dest_cs != nullptr) {
if (padding != 0) {
buf_offset += padding;
prev_dest_cs->_limit += padding;
@ -493,7 +493,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
prev_cs = cs;
}
debug_only(dest_cs->_start = NULL); // defeat double-initialization assert
debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert
dest_cs->initialize(buf+buf_offset, csize);
dest_cs->set_end(buf+buf_offset+csize);
assert(dest_cs->is_allocated(), "must always be allocated");
@ -510,7 +510,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
// Append an oop reference that keeps the class alive.
static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
oop cl = k->klass_holder();
if (cl != NULL && !oops->contains(cl)) {
if (cl != nullptr && !oops->contains(cl)) {
oops->append(cl);
}
}
@ -613,7 +613,7 @@ int CodeBuffer::total_skipped_instructions_size() const {
}
csize_t CodeBuffer::total_relocation_size() const {
csize_t total = copy_relocations_to(NULL); // dry run only
csize_t total = copy_relocations_to(nullptr); // dry run only
return (csize_t) align_up(total, HeapWordSize);
}
@ -656,7 +656,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
} else { // else shrink the filler to fit
filler = relocInfo(relocInfo::none, jump);
}
if (buf != NULL) {
if (buf != nullptr) {
assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
*(relocInfo*)(buf+buf_offset) = filler;
}
@ -671,7 +671,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
code_end_so_far += csize; // advance past this guy's instructions too
// Done with filler; emit the real relocations:
if (buf != NULL && lsize != 0) {
if (buf != nullptr && lsize != 0) {
assert(buf_offset + lsize <= buf_limit, "target in bounds");
assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
if (buf_offset % HeapWordSize == 0) {
@ -688,7 +688,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
// Align end of relocation info in target.
while (buf_offset % HeapWordSize != 0) {
if (buf != NULL) {
if (buf != nullptr) {
relocInfo padding = relocInfo(relocInfo::none, 0);
assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
*(relocInfo*)(buf+buf_offset) = padding;
@ -702,15 +702,15 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
}
csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
address buf = NULL;
address buf = nullptr;
csize_t buf_offset = 0;
csize_t buf_limit = 0;
if (dest != NULL) {
if (dest != nullptr) {
buf = (address)dest->relocation_begin();
buf_limit = (address)dest->relocation_end() - buf;
}
// if dest == NULL, this is just the sizing pass
// if dest is null, this is just the sizing pass
//
buf_offset = copy_relocations_to(buf, buf_limit, false);
@ -752,7 +752,7 @@ void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
// ascending address).
void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
address dest_end = dest->_total_start + dest->_total_size;
address dest_filled = NULL;
address dest_filled = nullptr;
for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
// pull code out of each section
const CodeSection* cs = code_section(n);
@ -768,7 +768,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
(HeapWord*)dest_cs->start(),
wsize / HeapWordSize);
if (dest->blob() == NULL) {
if (dest->blob() == nullptr) {
// Destination is a final resting place, not just another buffer.
// Normalize uninitialized bytes in the final padding.
Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
@ -802,7 +802,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
}
}
if (dest->blob() == NULL && dest_filled != NULL) {
if (dest->blob() == nullptr && dest_filled != nullptr) {
// Destination is a final resting place, not just another buffer.
// Normalize uninitialized bytes in the final padding.
Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
@ -865,7 +865,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
this->print();
}
if (StressCodeBuffers && blob() != NULL) {
if (StressCodeBuffers && blob() != nullptr) {
static int expand_count = 0;
if (expand_count >= 0) expand_count += 1;
if (expand_count > 100 && is_power_of_2(expand_count)) {
@ -878,7 +878,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
// Resizing must be allowed
{
if (blob() == NULL) return; // caller must check for blob == NULL
if (blob() == nullptr) return; // caller must check if blob is null
}
// Figure new capacity for each section.
@ -889,7 +889,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
// Create a new (temporary) code buffer to hold all the new data
CodeBuffer cb(name(), new_total_cap, 0);
if (cb.blob() == NULL) {
if (cb.blob() == nullptr) {
// Failed to allocate in code cache.
free_blob();
return;
@ -901,7 +901,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
// has been created at any time in this CodeBuffer's past.
CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
bxp->take_over_code_from(this); // remember the old undersized blob
DEBUG_ONLY(this->_blob = NULL); // silence a later assert
DEBUG_ONLY(this->_blob = nullptr); // silence a later assert
bxp->_before_expand = this->_before_expand;
this->_before_expand = bxp;
@ -916,7 +916,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
assert(cb_sect->capacity() >= new_capacity[n], "big enough");
address cb_start = cb_sect->start();
cb_sect->set_end(cb_start + this_sect->size());
if (this_sect->mark() == NULL) {
if (this_sect->mark() == nullptr) {
cb_sect->clear_mark();
} else {
cb_sect->set_mark(cb_start + this_sect->mark_off());
@ -932,7 +932,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
// Copy the temporary code buffer into the current code buffer.
// Basically, do {*this = cb}, except for some control information.
this->take_over_code_from(&cb);
cb.set_blob(NULL);
cb.set_blob(nullptr);
// Zap the old code buffer contents, to avoid mistakenly using them.
debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
@ -942,7 +942,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
debug_only(verify_section_allocation();)
#ifndef PRODUCT
_decode_begin = NULL; // sanity
_decode_begin = nullptr; // sanity
if (PrintNMethods && (WizardMode || Verbose)) {
tty->print("expanded CodeBuffer:");
this->print();
@ -952,7 +952,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
// Must already have disposed of the old blob somehow.
assert(blob() == NULL, "must be empty");
assert(blob() == nullptr, "must be empty");
// Take the new blob away from cb.
set_blob(cb->blob());
// Take over all the section pointers.
@ -962,16 +962,16 @@ void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
this_sect->take_over_code_from(cb_sect);
}
_overflow_arena = cb->_overflow_arena;
cb->_overflow_arena = NULL;
cb->_overflow_arena = nullptr;
// Make sure the old cb won't try to use it or free it.
DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
}
void CodeBuffer::verify_section_allocation() {
address tstart = _total_start;
if (tstart == badAddress) return; // smashed by set_blob(NULL)
if (tstart == badAddress) return; // smashed by set_blob(nullptr)
address tend = tstart + _total_size;
if (_blob != NULL) {
if (_blob != nullptr) {
guarantee(tstart >= _blob->content_begin(), "sanity");
guarantee(tend <= _blob->content_end(), "sanity");
}
@ -996,7 +996,7 @@ void CodeBuffer::verify_section_allocation() {
}
void CodeBuffer::log_section_sizes(const char* name) {
if (xtty != NULL) {
if (xtty != nullptr) {
ttyLocker ttyl;
// log info about buffer usage
xtty->print_cr("<blob name='%s' total_size='%d'>", name, _total_size);
@ -1020,7 +1020,7 @@ bool CodeBuffer::finalize_stubs() {
}
void CodeBuffer::shared_stub_to_interp_for(ciMethod* callee, csize_t call_offset) {
if (_shared_stub_to_interp_requests == NULL) {
if (_shared_stub_to_interp_requests == nullptr) {
_shared_stub_to_interp_requests = new SharedStubToInterpRequests(8);
}
SharedStubToInterpRequest request(callee, call_offset);
@ -1061,8 +1061,8 @@ void CodeSection::print(const char* name) {
}
void CodeBuffer::print() {
if (this == NULL) {
tty->print_cr("NULL CodeBuffer pointer");
if (this == nullptr) {
tty->print_cr("null CodeBuffer pointer");
return;
}

View File

@ -110,14 +110,14 @@ class CodeSection {
// (Note: _locs_point used to be called _last_reloc_offset.)
CodeSection() {
_start = NULL;
_mark = NULL;
_end = NULL;
_limit = NULL;
_locs_start = NULL;
_locs_end = NULL;
_locs_limit = NULL;
_locs_point = NULL;
_start = nullptr;
_mark = nullptr;
_end = nullptr;
_limit = nullptr;
_locs_start = nullptr;
_locs_end = nullptr;
_locs_limit = nullptr;
_locs_point = nullptr;
_locs_own = false;
_scratch_emit = false;
_skipped_instructions_size = 0;
@ -131,9 +131,9 @@ class CodeSection {
}
void initialize(address start, csize_t size = 0) {
assert(_start == NULL, "only one init step, please");
assert(_start == nullptr, "only one init step, please");
_start = start;
_mark = NULL;
_mark = nullptr;
_end = start;
_limit = start + size;
@ -160,7 +160,7 @@ class CodeSection {
address end() const { return _end; }
address limit() const { return _limit; }
csize_t size() const { return (csize_t)(_end - _start); }
csize_t mark_off() const { assert(_mark != NULL, "not an offset");
csize_t mark_off() const { assert(_mark != nullptr, "not an offset");
return (csize_t)(_mark - _start); }
csize_t capacity() const { return (csize_t)(_limit - _start); }
csize_t remaining() const { return (csize_t)(_limit - _end); }
@ -174,9 +174,9 @@ class CodeSection {
csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
int index() const { return _index; }
bool is_allocated() const { return _start != NULL; }
bool is_allocated() const { return _start != nullptr; }
bool is_empty() const { return _start == _end; }
bool has_locs() const { return _locs_end != NULL; }
bool has_locs() const { return _locs_end != nullptr; }
// Mark scratch buffer.
void set_scratch_emit() { _scratch_emit = true; }
@ -201,7 +201,7 @@ class CodeSection {
void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
_mark = pc; }
void set_mark() { _mark = _end; }
void clear_mark() { _mark = NULL; }
void clear_mark() { _mark = nullptr; }
void set_locs_end(relocInfo* p) {
assert(p <= locs_limit(), "locs data fits in allocated buffer");
@ -450,16 +450,16 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
void initialize_misc(const char * name) {
// all pointers other than code_start/end and those inside the sections
assert(name != NULL, "must have a name");
assert(name != nullptr, "must have a name");
_name = name;
_before_expand = NULL;
_blob = NULL;
_oop_recorder = NULL;
_overflow_arena = NULL;
_last_insn = NULL;
_before_expand = nullptr;
_blob = nullptr;
_oop_recorder = nullptr;
_overflow_arena = nullptr;
_last_insn = nullptr;
_finalize_stubs = false;
_shared_stub_to_interp_requests = NULL;
_shared_trampoline_requests = NULL;
_shared_stub_to_interp_requests = nullptr;
_shared_trampoline_requests = nullptr;
_consts.initialize_outer(this, SECT_CONSTS);
_insts.initialize_outer(this, SECT_INSTS);
@ -470,7 +470,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
_const_section_alignment = (int) sizeof(jdouble);
#ifndef PRODUCT
_decode_begin = NULL;
_decode_begin = nullptr;
// Collect block comments, but restrict collection to cases where a disassembly is output.
_collect_comments = ( PrintAssembly
|| PrintStubCode
@ -525,7 +525,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
CodeBuffer(address code_start, csize_t code_size)
DEBUG_ONLY(: Scrubber(this, sizeof(*this)))
{
assert(code_start != NULL, "sanity");
assert(code_start != nullptr, "sanity");
initialize_misc("static buffer");
initialize(code_start, code_size);
debug_only(verify_section_allocation();)
@ -567,7 +567,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
const CodeSection* insts() const { return &_insts; }
// present sections in order; return NULL at end; consts is #0, etc.
// present sections in order; return null at end; consts is #0, etc.
CodeSection* code_section(int n) {
// This makes the slightly questionable but portable assumption
// that the various members (_consts, _insts, _stubs, etc.) are
@ -592,7 +592,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
int locator(address addr) const;
address locator_address(int locator) const {
if (locator < 0) return NULL;
if (locator < 0) return nullptr;
address start = code_section(locator_sect(locator))->start();
return start + locator_pos(locator);
}
@ -656,13 +656,13 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
// allocated size of any and all recorded oops
csize_t total_oop_size() const {
OopRecorder* recorder = oop_recorder();
return (recorder == NULL)? 0: recorder->oop_size();
return (recorder == nullptr)? 0: recorder->oop_size();
}
// allocated size of any and all recorded metadata
csize_t total_metadata_size() const {
OopRecorder* recorder = oop_recorder();
return (recorder == NULL)? 0: recorder->metadata_size();
return (recorder == nullptr)? 0: recorder->metadata_size();
}
// Configuration functions, called immediately after the CB is constructed.
@ -677,7 +677,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
address last_insn() const { return _last_insn; }
void set_last_insn(address a) { _last_insn = a; }
void clear_last_insn() { set_last_insn(NULL); }
void clear_last_insn() { set_last_insn(nullptr); }
#ifndef PRODUCT
AsmRemarks &asm_remarks() { return _asm_remarks; }
@ -702,7 +702,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
// NMethod generation
void copy_code_and_locs_to(CodeBlob* blob) {
assert(blob != NULL, "sane");
assert(blob != nullptr, "sane");
copy_relocations_to(blob);
copy_code_to(blob);
}
@ -713,7 +713,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
}
void block_comment(ptrdiff_t offset, const char* comment) PRODUCT_RETURN;
const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
const char* code_string(const char* str) PRODUCT_RETURN_(return nullptr;);
// Log a little info about section usage in the CodeBuffer
void log_section_sizes(const char* name);
@ -753,7 +753,7 @@ class SharedStubToInterpRequest : public ResourceObj {
CodeBuffer::csize_t _call_offset; // The offset of the call in CodeBuffer
public:
SharedStubToInterpRequest(ciMethod* method = NULL, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
SharedStubToInterpRequest(ciMethod* method = nullptr, CodeBuffer::csize_t call_offset = -1) : _shared_method(method),
_call_offset(call_offset) {}
ciMethod* shared_method() const { return _shared_method; }

View File

@ -33,7 +33,7 @@
template <typename MacroAssembler, int relocate_format = 0>
bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* shared_stub_to_interp_requests) {
if (shared_stub_to_interp_requests == NULL) {
if (shared_stub_to_interp_requests == nullptr) {
return true;
}
auto by_shared_method = [](SharedStubToInterpRequest* r1, SharedStubToInterpRequest* r2) {
@ -49,7 +49,7 @@ bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* sha
MacroAssembler masm(cb);
for (int i = 0; i < shared_stub_to_interp_requests->length();) {
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) {
if (stub == nullptr) {
return false;
}